yfs-源码剖析(1)--锁服务

文章目录
  1. 1. 介绍
  2. 2. lock_protocol.h
    1. 2.0.1. lock
    2. 2.0.2. lock_protocol
  • 3. lock_client
  • 4. lock_server
  • 5. 参考资料
  • 介绍

    yfs 需要一个锁服务来协调文件系统结构的更新。

    其中包括两个模块,分别是锁客户端和锁服务器。

    他们之间的 RPC 交互流程如下:

    1. 客户端发送请求从锁服务器请求锁。
    2. 锁服务器在一个时间点只能向一个客户端发送锁。
    3. 客户端不需要锁的时候,会向服务器发送释放请求。
    4. 释放锁后,服务端会把锁给其他正在请求的客户端。

    lock_protocol.h

    lock

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    class lock {
    public:
    enum lock_status {FREE, LOCKED};
    // 用这个来标识每个锁
    lock_protocol::lockid_t m_lid;
    // FREE or LOCKED
    int m_state;
    // 条件变量
    std::condition_variable m_cv;

    // 构造函数
    lock(lock_protocol::lockid_t lid, int state);
    };


    lock::lock(lock_protocol::lockid_t lid, int state) : m_lid(lid), m_state(state)
    {
    }

    lock_protocol

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    class lock_protocol {
    public:
    enum xxstatus { OK, RETRY, RPCERR, NOENT, IOERR };
    typedef int status;
    typedef unsigned long long lockid_t;
    enum rpc_numbers {
    acquire = 0x7001,
    release,
    stat
    };
    };

    lock_client

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    class lock_client {
    protected:
    rpcc *cl;
    public:
    lock_client(std::string d);
    virtual ~lock_client() {};
    virtual lock_protocol::status acquire(lock_protocol::lockid_t); // 获得锁的接口
    virtual lock_protocol::status release(lock_protocol::lockid_t); // 释放锁的接口
    virtual lock_protocol::status stat(lock_protocol::lockid_t); // 获得 status 的接口
    };

    通过 lock_client 类发送 RPC 至 lock_server, 以获取来自 lock_server 的响应。

    lock_client 的实现如下:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    lock_client::lock_client(std::string dst)  // rpc 绑定至 dst 
    {
    sockaddr_in dstsock;
    make_sockaddr(dst.c_str(), &dstsock); // rpc.h 里的辅助函数
    cl = new rpcc(dstsock);
    if (cl->bind() < 0) {
    printf("lock_client: call bind\n");
    }
    }

    int
    lock_client::stat(lock_protocol::lockid_t lid)
    {
    int r;
    int ret = cl->call(lock_protocol::stat, cl->id(), lid, r);
    VERIFY (ret == lock_protocol::OK);
    return r;
    }

    lock_protocol::status
    lock_client::acquire(lock_protocol::lockid_t lid)
    {
    int r;
    int ret = cl->call(lock_protocol::acquire, cl->id(), lid, r);
    VERIFY (ret == lock_protocol::OK);
    return r;
    }

    lock_protocol::status
    lock_client::release(lock_protocol::lockid_t lid)
    {
    int r;
    int ret = cl->call(lock_protocol::release, cl->id(), lid, r);
    VERIFY (ret == lock_protocol::OK);
    return r;
    }

    client 端都是些套路代码,主要实现都来自于 server 端的函数。

    lock_server

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    #ifndef lock_server_h
    #define lock_server_h

    #include <string>
    #include <map>
    #include <mutex>
    #include "lock_protocol.h"
    #include "lock_client.h"
    #include "rpc.h"

    #include <memory>

    class lock_server {
    protected:
    int nacquire;
    std::mutex m_mutex;
    std::map<lock_protocol::lockid_t, lock*> m_lockMap;

    public:
    lock_server();
    ~lock_server() {};
    lock_protocol::status stat(int clt, lock_protocol::lockid_t lid, int &);
    lock_protocol::status acquire(int clt, lock_protocol::lockid_t lid, int &);
    lock_protocol::status release(int clt, lock_protocol::lockid_t lid, int &);
    };
    #endif
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    #include "lock_server.h"
    #include <sstream>
    #include <stdio.h>
    #include <unistd.h>
    #include <arpa/inet.h>

    // 上面 lock 的构造函数
    lock::lock(lock_protocol::lockid_t lid, int state) : m_lid(lid), m_state(state)
    {
    }

    // lock_server 的构造函数
    lock_server::lock_server():
    nacquire (0)
    {
    }

    lock_protocol::status
    lock_server::stat(int clt, lock_protocol::lockid_t lid, int &r)
    {
    lock_protocol::status ret = lock_protocol::OK;
    printf("stat request from clt %d\n", clt);
    r = nacquire;
    return ret;
    }

    lock_protocol::status
    lock_server::acquire(int clt, lock_protocol::lockid_t lid, int &r)
    {
    lock_protocol::status ret = lock_protocol::OK; // 初始化返回值

    std::unique_lock<std::mutex> lck(m_mutex);

    auto iter = m_lockMap.find(lid);
    if (iter != m_lockMap.end())
    {
    while(iter->second->m_state != lock::FREE)
    {
    iter->second->m_cv.wait(lck); // 在条件变量上 wait
    }
    iter->second->m_state = lock::LOCKED; // 获得 lock
    }
    else
    {
    // 没找到就新建一个锁
    auto p_mutex = new lock(lid, lock::LOCKED);
    m_lockMap.insert(std::pair<lock_protocol::lockid_t, lock*>(lid, p_mutex));
    }

    return ret;
    }

    lock_protocol::status
    lock_server::release(int clt, lock_protocol::lockid_t lid, int &r)
    {
    lock_protocol::status ret = lock_protocol::OK;

    std::unique_lock<std::mutex> lck(m_mutex);

    auto iter = m_lockMap.find(lid);
    if (iter != m_lockMap.end())
    {
    iter->second->m_state = lock::FREE;
    iter->second->m_cv.notify_all(); // 条件变量 notify_all 这样 wai t的会被唤醒
    }
    else
    {
    ret = lock_protocol::IOERR;
    }

    m_mutex.unlock();
    return ret;
    }

    参考资料