-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathserver.cc
159 lines (133 loc) · 4.87 KB
/
server.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
#include <unistd.h>
#include <cstdlib>
#include <thread>
#include "./src/craft/raft.h"
#include "atomic"
#include "craft/high_availability.h"
using namespace std::chrono;
static craft::Raft *rft_p = nullptr;
class CoreDumpTask : public MonitorTask {
public:
CoreDumpTask(craft::Raft *rft_p) : m_rft_p_(rft_p) { is_tmp = true; }
std::string describe() const noexcept override { return "coredump"; }
virtual nlohmann::json get_host_data_() override { return {}; }
virtual nlohmann::json get_pid_data_() override {
if (m_rft_p_ == nullptr) {
return {};
}
nlohmann::json data = m_rft_p_->base_json();
data["action"] = "coredump";
return data;
}
private:
craft::Raft *m_rft_p_{};
};
void coredump_handle(int n) {
if (rft_p == nullptr) {
return;
}
auto js = rft_p->base_json();
rft_p->m_monitor_->record_batch<MonitorTask>({new CoreDumpTask(rft_p)});
rft_p->m_monitor_->flush();
printf(" coredump flushed !!!\n");
}
class KVServer : public craft::AbstractPersist {
public:
KVServer(std::string path, std::string snapFileName)
: AbstractPersist(std::move(path), std::move(snapFileName)) {}
void deserialization(const char *filename) override {
// from snapshot file load data to this object
/*
* some IO operation ...
*/
}
void serialization() override {
// save data to snapshot file,such as this object to serialize to
// snapshot file
/*
* some IO operation ...
*/
}
void addPair(std::pair<std::string, std::string> data) {
/* some operation*/
}
private:
std::map<std::string, std::string> kv_datas_;
};
void run() {
// start libgo coroutine
std::thread([] { co_sched.Start(0, 0); }).detach();
// set log level
spdlog::set_level(spdlog::level::debug);
// set snapshot and persist path
const char *homePath = std::getenv("RAFT_HOME_PATH");
if (homePath == nullptr) {
spdlog::error(
"RAFT_HOME_PATH is not set. please run 'source setenv.sh' first");
exit(1);
}
std::string abs_path = std::string(homePath) + "/.data";
// set snapshot file name
std::string snapFileName = "KVServer.snap";
KVServer kv(abs_path, snapFileName);
co_chan<ApplyMsg> msgCh(100000);
rft_p = new craft::Raft(&kv, &msgCh);
// struct sigaction sa;
// sa.sa_handler = &coredump_handle;
// sigemptyset(&sa.sa_mask);
// sa.sa_flags = SA_RESTART | SA_NOCLDSTOP | SA_RESETHAND|SA_NODEFER;
// if (sigaction(SIGCHLD, &sa, 0) == -1) {
// perror("sigaction failed");
// exit(1);
// }
signal(SIGSEGV, coredump_handle); // 捕获段错误
signal(SIGFPE, coredump_handle); // 捕获浮点异常
signal(SIGINT, coredump_handle); // 捕获浮点异常
signal(SIGABRT, coredump_handle);
rft_p->setLogLevel(spdlog::level::info);
rft_p->launch();
// auto start = high_resolution_clock::now();
// std::atomic<long long int> i =0 ;
// for (int k = 0; k < 8; k++) {
// std::thread([&] {
// while (true) {
// i++;
// ApplyMsg msg;
// msgCh >> msg;
// auto end = high_resolution_clock::now();
// spdlog::info("i = [{}]",i++);
// // spdlog::info(" get Apply msg [{},{},{}]",
// msg.commandValid,
// // msg.command.content, msg.commandIndex);
// // raft.saveSnapShot(msg.commandIndex);
// }
// }).detach();
// }
sleep(INT32_MAX);
}
int main(int argc, char **argv) {
torch::jit::script::Module raft_state_critic_model;
raft_state_critic_model = torch::jit::load(
"/home/cdy/code/projects/cRaft/src/train/model/"
"state_critic_cxx_model_scripted.pt");
raft_state_critic_model.to(torch::kCPU);
for (int i = 0; i < 100; i++) {
// 毫秒级
auto start = std::chrono::high_resolution_clock::now();
torch::Tensor input_tensor = torch::rand({5, 5, 98});
std::vector<torch::jit::IValue> inputs;
inputs.push_back(input_tensor);
torch::Tensor output =
raft_state_critic_model.forward(inputs).toTensor();
auto end = std::chrono::high_resolution_clock::now();
std::cout << "Model output: " << output[0] << " time: " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << std::endl;
}
// std::cout << "Model output: " << output[0] << std::endl;
// torch::Device device(torch::kCUDA, 0);
// raft_state_critic_model.to(device);
// raft_state_critic_model.to(device);
// run();
// HighAvai *high_avai = HighAvai::getInstance(run, 2);
// high_avai->setRestartCount(5 /* defalut count = 5;*/);
// high_avai->start(argc, argv);
}