Spaces:
Sleeping
Sleeping
rpc : track allocated buffers (llama/7411)
Browse files* rpc : track allocated buffers
ref: #7407
* rpc : pack rpc_tensor tightly
- ggml-rpc.cpp +176 -53
ggml-rpc.cpp
CHANGED
|
@@ -56,6 +56,7 @@ struct socket_t {
|
|
| 56 |
};
|
| 57 |
|
| 58 |
// ggml_tensor is serialized into rpc_tensor
|
|
|
|
| 59 |
struct rpc_tensor {
|
| 60 |
uint64_t id;
|
| 61 |
uint32_t type;
|
|
@@ -71,6 +72,7 @@ struct rpc_tensor {
|
|
| 71 |
uint64_t data;
|
| 72 |
char name[GGML_MAX_NAME];
|
| 73 |
};
|
|
|
|
| 74 |
|
| 75 |
// RPC commands
|
| 76 |
enum rpc_cmd {
|
|
@@ -340,23 +342,6 @@ static rpc_tensor serialize_tensor(const ggml_tensor * tensor) {
|
|
| 340 |
return result;
|
| 341 |
}
|
| 342 |
|
| 343 |
-
static ggml_tensor * deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor) {
|
| 344 |
-
ggml_tensor * result = ggml_new_tensor_4d(ctx, (ggml_type) tensor->type,
|
| 345 |
-
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
|
| 346 |
-
for (uint32_t i = 0; i < GGML_MAX_DIMS; i++) {
|
| 347 |
-
result->nb[i] = tensor->nb[i];
|
| 348 |
-
}
|
| 349 |
-
result->buffer = reinterpret_cast<ggml_backend_buffer_t>(tensor->buffer);
|
| 350 |
-
result->op = (ggml_op) tensor->op;
|
| 351 |
-
for (uint32_t i = 0; i < GGML_MAX_OP_PARAMS / sizeof(int32_t); i++) {
|
| 352 |
-
result->op_params[i] = tensor->op_params[i];
|
| 353 |
-
}
|
| 354 |
-
result->flags = tensor->flags;
|
| 355 |
-
result->data = reinterpret_cast<void *>(tensor->data);
|
| 356 |
-
ggml_set_name(result, tensor->name);
|
| 357 |
-
return result;
|
| 358 |
-
}
|
| 359 |
-
|
| 360 |
GGML_CALL static void ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
|
| 361 |
UNUSED(buffer);
|
| 362 |
if (ggml_is_quantized(tensor->type)) {
|
|
@@ -465,13 +450,15 @@ GGML_CALL static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer
|
|
| 465 |
memcpy(&remote_ptr, output.data(), sizeof(remote_ptr));
|
| 466 |
size_t remote_size;
|
| 467 |
memcpy(&remote_size, output.data() + sizeof(uint64_t), sizeof(remote_size));
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
|
|
|
|
|
|
| 475 |
}
|
| 476 |
|
| 477 |
static size_t get_alignment(const std::shared_ptr<socket_t> & sock) {
|
|
@@ -658,7 +645,7 @@ GGML_CALL ggml_backend_t ggml_backend_rpc_init(const char * endpoint) {
|
|
| 658 |
}
|
| 659 |
}
|
| 660 |
#endif
|
| 661 |
-
|
| 662 |
std::string host;
|
| 663 |
int port;
|
| 664 |
if (!parse_endpoint(endpoint, host, port)) {
|
|
@@ -731,22 +718,61 @@ GGML_API GGML_CALL void ggml_backend_rpc_get_device_memory(const char * endpoint
|
|
| 731 |
|
| 732 |
// RPC server-side implementation
|
| 733 |
|
| 734 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 735 |
// input serialization format: | size (8 bytes) |
|
|
|
|
|
|
|
|
|
|
| 736 |
uint64_t size;
|
| 737 |
memcpy(&size, input.data(), sizeof(size));
|
| 738 |
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
|
| 739 |
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size);
|
| 740 |
-
uint64_t remote_ptr =
|
| 741 |
-
uint64_t remote_size =
|
| 742 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 743 |
// output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) |
|
| 744 |
output.resize(2*sizeof(uint64_t), 0);
|
| 745 |
memcpy(output.data(), &remote_ptr, sizeof(remote_ptr));
|
| 746 |
memcpy(output.data() + sizeof(uint64_t), &remote_size, sizeof(remote_size));
|
|
|
|
| 747 |
}
|
| 748 |
|
| 749 |
-
|
| 750 |
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
|
| 751 |
size_t alignment = ggml_backend_buft_get_alignment(buft);
|
| 752 |
GGML_PRINT_DEBUG("[%s] alignment: %lu\n", __func__, alignment);
|
|
@@ -755,7 +781,7 @@ static void rpc_get_alignment(ggml_backend_t backend, std::vector<uint8_t> & out
|
|
| 755 |
memcpy(output.data(), &alignment, sizeof(alignment));
|
| 756 |
}
|
| 757 |
|
| 758 |
-
|
| 759 |
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
|
| 760 |
size_t max_size = ggml_backend_buft_get_max_size(buft);
|
| 761 |
GGML_PRINT_DEBUG("[%s] max_size: %lu\n", __func__, max_size);
|
|
@@ -764,41 +790,90 @@ static void rpc_get_max_size(ggml_backend_t backend, std::vector<uint8_t> & outp
|
|
| 764 |
memcpy(output.data(), &max_size, sizeof(max_size));
|
| 765 |
}
|
| 766 |
|
| 767 |
-
|
| 768 |
// input serialization format: | remote_ptr (8 bytes) |
|
|
|
|
|
|
|
|
|
|
| 769 |
uint64_t remote_ptr;
|
| 770 |
memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
|
| 771 |
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr);
|
| 772 |
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 773 |
void * base = ggml_backend_buffer_get_base(buffer);
|
| 774 |
// output serialization format: | base_ptr (8 bytes) |
|
| 775 |
uint64_t base_ptr = reinterpret_cast<uint64_t>(base);
|
| 776 |
output.resize(sizeof(uint64_t), 0);
|
| 777 |
memcpy(output.data(), &base_ptr, sizeof(base_ptr));
|
|
|
|
| 778 |
}
|
| 779 |
|
| 780 |
-
|
| 781 |
// input serialization format: | remote_ptr (8 bytes) |
|
|
|
|
|
|
|
|
|
|
| 782 |
uint64_t remote_ptr;
|
| 783 |
memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
|
| 784 |
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr);
|
| 785 |
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 786 |
ggml_backend_buffer_free(buffer);
|
|
|
|
|
|
|
| 787 |
}
|
| 788 |
|
| 789 |
-
|
| 790 |
// input serialization format: | remote_ptr (8 bytes) | value (1 byte) |
|
|
|
|
|
|
|
|
|
|
| 791 |
uint64_t remote_ptr;
|
| 792 |
memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
|
| 793 |
uint8_t value;
|
| 794 |
memcpy(&value, input.data() + sizeof(uint64_t), sizeof(value));
|
| 795 |
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, remote_ptr, value);
|
| 796 |
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 797 |
ggml_backend_buffer_clear(buffer, value);
|
|
|
|
| 798 |
}
|
| 799 |
|
| 800 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 801 |
// serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) |
|
|
|
|
|
|
|
|
|
|
| 802 |
const rpc_tensor * in_tensor = (const rpc_tensor *)input.data();
|
| 803 |
uint64_t offset;
|
| 804 |
memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset));
|
|
@@ -811,14 +886,23 @@ static void rpc_set_tensor(const std::vector<uint8_t> & input) {
|
|
| 811 |
};
|
| 812 |
struct ggml_context * ctx = ggml_init(params);
|
| 813 |
ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 814 |
GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu\n", __func__, (void*)tensor->buffer, tensor->data, offset, size);
|
| 815 |
const void * data = input.data() + sizeof(rpc_tensor) + sizeof(offset);
|
| 816 |
ggml_backend_tensor_set(tensor, data, offset, size);
|
| 817 |
ggml_free(ctx);
|
|
|
|
| 818 |
}
|
| 819 |
|
| 820 |
-
|
| 821 |
// serialization format: | rpc_tensor | offset (8 bytes) | size (8 bytes) |
|
|
|
|
|
|
|
|
|
|
| 822 |
const rpc_tensor * in_tensor = (const rpc_tensor *)input.data();
|
| 823 |
uint64_t offset;
|
| 824 |
memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset));
|
|
@@ -832,15 +916,24 @@ static void rpc_get_tensor(const std::vector<uint8_t> & input, std::vector<uint8
|
|
| 832 |
};
|
| 833 |
struct ggml_context * ctx = ggml_init(params);
|
| 834 |
ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 835 |
GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, offset, size);
|
| 836 |
// output serialization format: | data (size bytes) |
|
| 837 |
output.resize(size, 0);
|
| 838 |
ggml_backend_tensor_get(tensor, output.data(), offset, size);
|
| 839 |
ggml_free(ctx);
|
|
|
|
| 840 |
}
|
| 841 |
|
| 842 |
-
|
| 843 |
// serialization format: | rpc_tensor src | rpc_tensor dst |
|
|
|
|
|
|
|
|
|
|
| 844 |
const rpc_tensor * rpc_src = (const rpc_tensor *)input.data();
|
| 845 |
const rpc_tensor * rpc_dst = (const rpc_tensor *)(input.data() + sizeof(rpc_src));
|
| 846 |
|
|
@@ -852,18 +945,24 @@ static void rpc_copy_tensor(const std::vector<uint8_t> & input, std::vector<uint
|
|
| 852 |
struct ggml_context * ctx = ggml_init(params);
|
| 853 |
ggml_tensor * src = deserialize_tensor(ctx, rpc_src);
|
| 854 |
ggml_tensor * dst = deserialize_tensor(ctx, rpc_dst);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 855 |
GGML_PRINT_DEBUG("[%s] src->buffer: %p, dst->buffer: %p\n", __func__, (void*)src->buffer, (void*)dst->buffer);
|
| 856 |
bool result = ggml_backend_buffer_copy_tensor(src, dst);
|
| 857 |
// output serialization format: | result (1 byte) |
|
| 858 |
output.resize(1, 0);
|
| 859 |
output[0] = result;
|
| 860 |
ggml_free(ctx);
|
|
|
|
| 861 |
}
|
| 862 |
|
| 863 |
-
|
| 864 |
-
|
| 865 |
-
|
| 866 |
-
|
| 867 |
if (id == 0) {
|
| 868 |
return nullptr;
|
| 869 |
}
|
|
@@ -872,6 +971,9 @@ static struct ggml_tensor * create_node(uint64_t id,
|
|
| 872 |
}
|
| 873 |
const rpc_tensor * tensor = tensor_ptrs.at(id);
|
| 874 |
struct ggml_tensor * result = deserialize_tensor(ctx, tensor);
|
|
|
|
|
|
|
|
|
|
| 875 |
tensor_map[id] = result;
|
| 876 |
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
| 877 |
result->src[i] = create_node(tensor->src[i], ctx, tensor_ptrs, tensor_map);
|
|
@@ -881,14 +983,23 @@ static struct ggml_tensor * create_node(uint64_t id,
|
|
| 881 |
return result;
|
| 882 |
}
|
| 883 |
|
| 884 |
-
|
| 885 |
// serialization format:
|
| 886 |
// | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) |
|
|
|
|
|
|
|
|
|
|
| 887 |
uint32_t n_nodes;
|
| 888 |
memcpy(&n_nodes, input.data(), sizeof(n_nodes));
|
|
|
|
|
|
|
|
|
|
| 889 |
const uint64_t * nodes = (const uint64_t *)(input.data() + sizeof(n_nodes));
|
| 890 |
uint32_t n_tensors;
|
| 891 |
memcpy(&n_tensors, input.data() + sizeof(n_nodes) + n_nodes*sizeof(uint64_t), sizeof(n_tensors));
|
|
|
|
|
|
|
|
|
|
| 892 |
const rpc_tensor * tensors = (const rpc_tensor *)(input.data() + sizeof(n_nodes) + n_nodes*sizeof(uint64_t) + sizeof(n_tensors));
|
| 893 |
GGML_PRINT_DEBUG("[%s] n_nodes: %u, n_tensors: %u\n", __func__, n_nodes, n_tensors);
|
| 894 |
|
|
@@ -914,9 +1025,17 @@ static void rpc_graph_compute(ggml_backend_t backend, const std::vector<uint8_t>
|
|
| 914 |
output.resize(1, 0);
|
| 915 |
output[0] = status;
|
| 916 |
ggml_free(ctx);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 917 |
}
|
| 918 |
|
| 919 |
static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t free_mem, size_t total_mem) {
|
|
|
|
| 920 |
while (true) {
|
| 921 |
uint8_t cmd;
|
| 922 |
if (!recv_data(sockfd, &cmd, 1)) {
|
|
@@ -932,45 +1051,46 @@ static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t fre
|
|
| 932 |
if (!recv_data(sockfd, input.data(), input_size)) {
|
| 933 |
break;
|
| 934 |
}
|
|
|
|
| 935 |
switch (cmd) {
|
| 936 |
case ALLOC_BUFFER: {
|
| 937 |
-
|
| 938 |
break;
|
| 939 |
}
|
| 940 |
case GET_ALIGNMENT: {
|
| 941 |
-
|
| 942 |
break;
|
| 943 |
}
|
| 944 |
case GET_MAX_SIZE: {
|
| 945 |
-
|
| 946 |
break;
|
| 947 |
}
|
| 948 |
case BUFFER_GET_BASE: {
|
| 949 |
-
|
| 950 |
break;
|
| 951 |
}
|
| 952 |
case FREE_BUFFER: {
|
| 953 |
-
|
| 954 |
break;
|
| 955 |
}
|
| 956 |
case BUFFER_CLEAR: {
|
| 957 |
-
|
| 958 |
break;
|
| 959 |
}
|
| 960 |
case SET_TENSOR: {
|
| 961 |
-
|
| 962 |
break;
|
| 963 |
}
|
| 964 |
case GET_TENSOR: {
|
| 965 |
-
|
| 966 |
break;
|
| 967 |
}
|
| 968 |
case COPY_TENSOR: {
|
| 969 |
-
|
| 970 |
break;
|
| 971 |
}
|
| 972 |
case GRAPH_COMPUTE: {
|
| 973 |
-
|
| 974 |
break;
|
| 975 |
}
|
| 976 |
case GET_DEVICE_MEMORY: {
|
|
@@ -982,9 +1102,12 @@ static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t fre
|
|
| 982 |
}
|
| 983 |
default: {
|
| 984 |
fprintf(stderr, "Unknown command: %d\n", cmd);
|
| 985 |
-
|
| 986 |
}
|
| 987 |
}
|
|
|
|
|
|
|
|
|
|
| 988 |
uint64_t output_size = output.size();
|
| 989 |
if (!send_data(sockfd, &output_size, sizeof(output_size))) {
|
| 990 |
break;
|
|
|
|
| 56 |
};
|
| 57 |
|
| 58 |
// ggml_tensor is serialized into rpc_tensor
|
| 59 |
+
#pragma pack(push, 1)
|
| 60 |
struct rpc_tensor {
|
| 61 |
uint64_t id;
|
| 62 |
uint32_t type;
|
|
|
|
| 72 |
uint64_t data;
|
| 73 |
char name[GGML_MAX_NAME];
|
| 74 |
};
|
| 75 |
+
#pragma pack(pop)
|
| 76 |
|
| 77 |
// RPC commands
|
| 78 |
enum rpc_cmd {
|
|
|
|
| 342 |
return result;
|
| 343 |
}
|
| 344 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 345 |
GGML_CALL static void ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
|
| 346 |
UNUSED(buffer);
|
| 347 |
if (ggml_is_quantized(tensor->type)) {
|
|
|
|
| 450 |
memcpy(&remote_ptr, output.data(), sizeof(remote_ptr));
|
| 451 |
size_t remote_size;
|
| 452 |
memcpy(&remote_size, output.data() + sizeof(uint64_t), sizeof(remote_size));
|
| 453 |
+
if (remote_ptr != 0) {
|
| 454 |
+
ggml_backend_buffer_t buffer = ggml_backend_buffer_init(buft,
|
| 455 |
+
ggml_backend_rpc_buffer_interface,
|
| 456 |
+
new ggml_backend_rpc_buffer_context{buft_ctx->sock, {}, remote_ptr, "RPC"},
|
| 457 |
+
remote_size);
|
| 458 |
+
return buffer;
|
| 459 |
+
} else {
|
| 460 |
+
return nullptr;
|
| 461 |
+
}
|
| 462 |
}
|
| 463 |
|
| 464 |
static size_t get_alignment(const std::shared_ptr<socket_t> & sock) {
|
|
|
|
| 645 |
}
|
| 646 |
}
|
| 647 |
#endif
|
| 648 |
+
fprintf(stderr, "Connecting to %s\n", endpoint);
|
| 649 |
std::string host;
|
| 650 |
int port;
|
| 651 |
if (!parse_endpoint(endpoint, host, port)) {
|
|
|
|
| 718 |
|
| 719 |
// RPC server-side implementation
|
| 720 |
|
| 721 |
+
class rpc_server {
|
| 722 |
+
public:
|
| 723 |
+
rpc_server(ggml_backend_t backend) : backend(backend) {}
|
| 724 |
+
~rpc_server();
|
| 725 |
+
|
| 726 |
+
bool alloc_buffer(const std::vector<uint8_t> & input, std::vector<uint8_t> & output);
|
| 727 |
+
void get_alignment(std::vector<uint8_t> & output);
|
| 728 |
+
void get_max_size(std::vector<uint8_t> & output);
|
| 729 |
+
bool buffer_get_base(const std::vector<uint8_t> & input, std::vector<uint8_t> & output);
|
| 730 |
+
bool free_buffer(const std::vector<uint8_t> & input);
|
| 731 |
+
bool buffer_clear(const std::vector<uint8_t> & input);
|
| 732 |
+
bool set_tensor(const std::vector<uint8_t> & input);
|
| 733 |
+
bool get_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output);
|
| 734 |
+
bool copy_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output);
|
| 735 |
+
bool graph_compute(const std::vector<uint8_t> & input, std::vector<uint8_t> & output);
|
| 736 |
+
|
| 737 |
+
private:
|
| 738 |
+
ggml_tensor * deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor);
|
| 739 |
+
ggml_tensor * create_node(uint64_t id,
|
| 740 |
+
struct ggml_context * ctx,
|
| 741 |
+
const std::unordered_map<uint64_t, const rpc_tensor*> & tensor_ptrs,
|
| 742 |
+
std::unordered_map<uint64_t, struct ggml_tensor*> & tensor_map);
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
ggml_backend_t backend;
|
| 746 |
+
std::unordered_set<ggml_backend_buffer_t> buffers;
|
| 747 |
+
};
|
| 748 |
+
|
| 749 |
+
bool rpc_server::alloc_buffer(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
|
| 750 |
// input serialization format: | size (8 bytes) |
|
| 751 |
+
if (input.size() != sizeof(uint64_t)) {
|
| 752 |
+
return false;
|
| 753 |
+
}
|
| 754 |
uint64_t size;
|
| 755 |
memcpy(&size, input.data(), sizeof(size));
|
| 756 |
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
|
| 757 |
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size);
|
| 758 |
+
uint64_t remote_ptr = 0;
|
| 759 |
+
uint64_t remote_size = 0;
|
| 760 |
+
if (buffer != nullptr) {
|
| 761 |
+
remote_ptr = reinterpret_cast<uint64_t>(buffer);
|
| 762 |
+
remote_size = buffer->size;
|
| 763 |
+
GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, size, remote_ptr, remote_size);
|
| 764 |
+
buffers.insert(buffer);
|
| 765 |
+
} else {
|
| 766 |
+
GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> failed\n", __func__, size);
|
| 767 |
+
}
|
| 768 |
// output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) |
|
| 769 |
output.resize(2*sizeof(uint64_t), 0);
|
| 770 |
memcpy(output.data(), &remote_ptr, sizeof(remote_ptr));
|
| 771 |
memcpy(output.data() + sizeof(uint64_t), &remote_size, sizeof(remote_size));
|
| 772 |
+
return true;
|
| 773 |
}
|
| 774 |
|
| 775 |
+
void rpc_server::get_alignment(std::vector<uint8_t> & output) {
|
| 776 |
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
|
| 777 |
size_t alignment = ggml_backend_buft_get_alignment(buft);
|
| 778 |
GGML_PRINT_DEBUG("[%s] alignment: %lu\n", __func__, alignment);
|
|
|
|
| 781 |
memcpy(output.data(), &alignment, sizeof(alignment));
|
| 782 |
}
|
| 783 |
|
| 784 |
+
void rpc_server::get_max_size(std::vector<uint8_t> & output) {
|
| 785 |
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
|
| 786 |
size_t max_size = ggml_backend_buft_get_max_size(buft);
|
| 787 |
GGML_PRINT_DEBUG("[%s] max_size: %lu\n", __func__, max_size);
|
|
|
|
| 790 |
memcpy(output.data(), &max_size, sizeof(max_size));
|
| 791 |
}
|
| 792 |
|
| 793 |
+
bool rpc_server::buffer_get_base(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
|
| 794 |
// input serialization format: | remote_ptr (8 bytes) |
|
| 795 |
+
if (input.size() != sizeof(uint64_t)) {
|
| 796 |
+
return false;
|
| 797 |
+
}
|
| 798 |
uint64_t remote_ptr;
|
| 799 |
memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
|
| 800 |
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr);
|
| 801 |
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
|
| 802 |
+
if (buffers.find(buffer) == buffers.end()) {
|
| 803 |
+
GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
|
| 804 |
+
return false;
|
| 805 |
+
}
|
| 806 |
void * base = ggml_backend_buffer_get_base(buffer);
|
| 807 |
// output serialization format: | base_ptr (8 bytes) |
|
| 808 |
uint64_t base_ptr = reinterpret_cast<uint64_t>(base);
|
| 809 |
output.resize(sizeof(uint64_t), 0);
|
| 810 |
memcpy(output.data(), &base_ptr, sizeof(base_ptr));
|
| 811 |
+
return true;
|
| 812 |
}
|
| 813 |
|
| 814 |
+
bool rpc_server::free_buffer(const std::vector<uint8_t> & input) {
|
| 815 |
// input serialization format: | remote_ptr (8 bytes) |
|
| 816 |
+
if (input.size() != sizeof(uint64_t)) {
|
| 817 |
+
return false;
|
| 818 |
+
}
|
| 819 |
uint64_t remote_ptr;
|
| 820 |
memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
|
| 821 |
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr);
|
| 822 |
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
|
| 823 |
+
if (buffers.find(buffer) == buffers.end()) {
|
| 824 |
+
GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
|
| 825 |
+
return false;
|
| 826 |
+
}
|
| 827 |
ggml_backend_buffer_free(buffer);
|
| 828 |
+
buffers.erase(buffer);
|
| 829 |
+
return true;
|
| 830 |
}
|
| 831 |
|
| 832 |
+
bool rpc_server::buffer_clear(const std::vector<uint8_t> & input) {
|
| 833 |
// input serialization format: | remote_ptr (8 bytes) | value (1 byte) |
|
| 834 |
+
if (input.size() != sizeof(uint64_t) + sizeof(uint8_t)) {
|
| 835 |
+
return false;
|
| 836 |
+
}
|
| 837 |
uint64_t remote_ptr;
|
| 838 |
memcpy(&remote_ptr, input.data(), sizeof(remote_ptr));
|
| 839 |
uint8_t value;
|
| 840 |
memcpy(&value, input.data() + sizeof(uint64_t), sizeof(value));
|
| 841 |
GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, remote_ptr, value);
|
| 842 |
ggml_backend_buffer_t buffer = reinterpret_cast<ggml_backend_buffer_t>(remote_ptr);
|
| 843 |
+
if (buffers.find(buffer) == buffers.end()) {
|
| 844 |
+
GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
|
| 845 |
+
return false;
|
| 846 |
+
}
|
| 847 |
ggml_backend_buffer_clear(buffer, value);
|
| 848 |
+
return true;
|
| 849 |
}
|
| 850 |
|
| 851 |
+
ggml_tensor * rpc_server::deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor) {
|
| 852 |
+
ggml_tensor * result = ggml_new_tensor_4d(ctx, (ggml_type) tensor->type,
|
| 853 |
+
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
|
| 854 |
+
for (uint32_t i = 0; i < GGML_MAX_DIMS; i++) {
|
| 855 |
+
result->nb[i] = tensor->nb[i];
|
| 856 |
+
}
|
| 857 |
+
result->buffer = reinterpret_cast<ggml_backend_buffer_t>(tensor->buffer);
|
| 858 |
+
if (result->buffer && buffers.find(result->buffer) == buffers.end()) {
|
| 859 |
+
return nullptr;
|
| 860 |
+
}
|
| 861 |
+
result->op = (ggml_op) tensor->op;
|
| 862 |
+
for (uint32_t i = 0; i < GGML_MAX_OP_PARAMS / sizeof(int32_t); i++) {
|
| 863 |
+
result->op_params[i] = tensor->op_params[i];
|
| 864 |
+
}
|
| 865 |
+
result->flags = tensor->flags;
|
| 866 |
+
result->data = reinterpret_cast<void *>(tensor->data);
|
| 867 |
+
ggml_set_name(result, tensor->name);
|
| 868 |
+
return result;
|
| 869 |
+
}
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
bool rpc_server::set_tensor(const std::vector<uint8_t> & input) {
|
| 873 |
// serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) |
|
| 874 |
+
if (input.size() < sizeof(rpc_tensor) + sizeof(uint64_t)) {
|
| 875 |
+
return false;
|
| 876 |
+
}
|
| 877 |
const rpc_tensor * in_tensor = (const rpc_tensor *)input.data();
|
| 878 |
uint64_t offset;
|
| 879 |
memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset));
|
|
|
|
| 886 |
};
|
| 887 |
struct ggml_context * ctx = ggml_init(params);
|
| 888 |
ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor);
|
| 889 |
+
if (tensor == nullptr) {
|
| 890 |
+
GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
|
| 891 |
+
ggml_free(ctx);
|
| 892 |
+
return false;
|
| 893 |
+
}
|
| 894 |
GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu\n", __func__, (void*)tensor->buffer, tensor->data, offset, size);
|
| 895 |
const void * data = input.data() + sizeof(rpc_tensor) + sizeof(offset);
|
| 896 |
ggml_backend_tensor_set(tensor, data, offset, size);
|
| 897 |
ggml_free(ctx);
|
| 898 |
+
return true;
|
| 899 |
}
|
| 900 |
|
| 901 |
+
bool rpc_server::get_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
|
| 902 |
// serialization format: | rpc_tensor | offset (8 bytes) | size (8 bytes) |
|
| 903 |
+
if (input.size() != sizeof(rpc_tensor) + 2*sizeof(uint64_t)) {
|
| 904 |
+
return false;
|
| 905 |
+
}
|
| 906 |
const rpc_tensor * in_tensor = (const rpc_tensor *)input.data();
|
| 907 |
uint64_t offset;
|
| 908 |
memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset));
|
|
|
|
| 916 |
};
|
| 917 |
struct ggml_context * ctx = ggml_init(params);
|
| 918 |
ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor);
|
| 919 |
+
if (tensor == nullptr) {
|
| 920 |
+
GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
|
| 921 |
+
ggml_free(ctx);
|
| 922 |
+
return false;
|
| 923 |
+
}
|
| 924 |
GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, offset, size);
|
| 925 |
// output serialization format: | data (size bytes) |
|
| 926 |
output.resize(size, 0);
|
| 927 |
ggml_backend_tensor_get(tensor, output.data(), offset, size);
|
| 928 |
ggml_free(ctx);
|
| 929 |
+
return true;
|
| 930 |
}
|
| 931 |
|
| 932 |
+
bool rpc_server::copy_tensor(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
|
| 933 |
// serialization format: | rpc_tensor src | rpc_tensor dst |
|
| 934 |
+
if (input.size() != 2*sizeof(rpc_tensor)) {
|
| 935 |
+
return false;
|
| 936 |
+
}
|
| 937 |
const rpc_tensor * rpc_src = (const rpc_tensor *)input.data();
|
| 938 |
const rpc_tensor * rpc_dst = (const rpc_tensor *)(input.data() + sizeof(rpc_src));
|
| 939 |
|
|
|
|
| 945 |
struct ggml_context * ctx = ggml_init(params);
|
| 946 |
ggml_tensor * src = deserialize_tensor(ctx, rpc_src);
|
| 947 |
ggml_tensor * dst = deserialize_tensor(ctx, rpc_dst);
|
| 948 |
+
if (src == nullptr || dst == nullptr) {
|
| 949 |
+
GGML_PRINT_DEBUG("[%s] error deserializing tensors\n", __func__);
|
| 950 |
+
ggml_free(ctx);
|
| 951 |
+
return false;
|
| 952 |
+
}
|
| 953 |
GGML_PRINT_DEBUG("[%s] src->buffer: %p, dst->buffer: %p\n", __func__, (void*)src->buffer, (void*)dst->buffer);
|
| 954 |
bool result = ggml_backend_buffer_copy_tensor(src, dst);
|
| 955 |
// output serialization format: | result (1 byte) |
|
| 956 |
output.resize(1, 0);
|
| 957 |
output[0] = result;
|
| 958 |
ggml_free(ctx);
|
| 959 |
+
return true;
|
| 960 |
}
|
| 961 |
|
| 962 |
+
ggml_tensor * rpc_server::create_node(uint64_t id,
|
| 963 |
+
struct ggml_context * ctx,
|
| 964 |
+
const std::unordered_map<uint64_t, const rpc_tensor*> & tensor_ptrs,
|
| 965 |
+
std::unordered_map<uint64_t, struct ggml_tensor*> & tensor_map) {
|
| 966 |
if (id == 0) {
|
| 967 |
return nullptr;
|
| 968 |
}
|
|
|
|
| 971 |
}
|
| 972 |
const rpc_tensor * tensor = tensor_ptrs.at(id);
|
| 973 |
struct ggml_tensor * result = deserialize_tensor(ctx, tensor);
|
| 974 |
+
if (result == nullptr) {
|
| 975 |
+
return nullptr;
|
| 976 |
+
}
|
| 977 |
tensor_map[id] = result;
|
| 978 |
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
| 979 |
result->src[i] = create_node(tensor->src[i], ctx, tensor_ptrs, tensor_map);
|
|
|
|
| 983 |
return result;
|
| 984 |
}
|
| 985 |
|
| 986 |
+
bool rpc_server::graph_compute(const std::vector<uint8_t> & input, std::vector<uint8_t> & output) {
|
| 987 |
// serialization format:
|
| 988 |
// | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) |
|
| 989 |
+
if (input.size() < sizeof(uint32_t)) {
|
| 990 |
+
return false;
|
| 991 |
+
}
|
| 992 |
uint32_t n_nodes;
|
| 993 |
memcpy(&n_nodes, input.data(), sizeof(n_nodes));
|
| 994 |
+
if (input.size() < sizeof(uint32_t) + n_nodes*sizeof(uint64_t) + sizeof(uint32_t)) {
|
| 995 |
+
return false;
|
| 996 |
+
}
|
| 997 |
const uint64_t * nodes = (const uint64_t *)(input.data() + sizeof(n_nodes));
|
| 998 |
uint32_t n_tensors;
|
| 999 |
memcpy(&n_tensors, input.data() + sizeof(n_nodes) + n_nodes*sizeof(uint64_t), sizeof(n_tensors));
|
| 1000 |
+
if (input.size() < sizeof(uint32_t) + n_nodes*sizeof(uint64_t) + sizeof(uint32_t) + n_tensors*sizeof(rpc_tensor)) {
|
| 1001 |
+
return false;
|
| 1002 |
+
}
|
| 1003 |
const rpc_tensor * tensors = (const rpc_tensor *)(input.data() + sizeof(n_nodes) + n_nodes*sizeof(uint64_t) + sizeof(n_tensors));
|
| 1004 |
GGML_PRINT_DEBUG("[%s] n_nodes: %u, n_tensors: %u\n", __func__, n_nodes, n_tensors);
|
| 1005 |
|
|
|
|
| 1025 |
output.resize(1, 0);
|
| 1026 |
output[0] = status;
|
| 1027 |
ggml_free(ctx);
|
| 1028 |
+
return true;
|
| 1029 |
+
}
|
| 1030 |
+
|
| 1031 |
+
rpc_server::~rpc_server() {
|
| 1032 |
+
for (auto buffer : buffers) {
|
| 1033 |
+
ggml_backend_buffer_free(buffer);
|
| 1034 |
+
}
|
| 1035 |
}
|
| 1036 |
|
| 1037 |
static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t free_mem, size_t total_mem) {
|
| 1038 |
+
rpc_server server(backend);
|
| 1039 |
while (true) {
|
| 1040 |
uint8_t cmd;
|
| 1041 |
if (!recv_data(sockfd, &cmd, 1)) {
|
|
|
|
| 1051 |
if (!recv_data(sockfd, input.data(), input_size)) {
|
| 1052 |
break;
|
| 1053 |
}
|
| 1054 |
+
bool ok = true;
|
| 1055 |
switch (cmd) {
|
| 1056 |
case ALLOC_BUFFER: {
|
| 1057 |
+
ok = server.alloc_buffer(input, output);
|
| 1058 |
break;
|
| 1059 |
}
|
| 1060 |
case GET_ALIGNMENT: {
|
| 1061 |
+
server.get_alignment(output);
|
| 1062 |
break;
|
| 1063 |
}
|
| 1064 |
case GET_MAX_SIZE: {
|
| 1065 |
+
server.get_max_size(output);
|
| 1066 |
break;
|
| 1067 |
}
|
| 1068 |
case BUFFER_GET_BASE: {
|
| 1069 |
+
ok = server.buffer_get_base(input, output);
|
| 1070 |
break;
|
| 1071 |
}
|
| 1072 |
case FREE_BUFFER: {
|
| 1073 |
+
ok = server.free_buffer(input);
|
| 1074 |
break;
|
| 1075 |
}
|
| 1076 |
case BUFFER_CLEAR: {
|
| 1077 |
+
ok = server.buffer_clear(input);
|
| 1078 |
break;
|
| 1079 |
}
|
| 1080 |
case SET_TENSOR: {
|
| 1081 |
+
ok = server.set_tensor(input);
|
| 1082 |
break;
|
| 1083 |
}
|
| 1084 |
case GET_TENSOR: {
|
| 1085 |
+
ok = server.get_tensor(input, output);
|
| 1086 |
break;
|
| 1087 |
}
|
| 1088 |
case COPY_TENSOR: {
|
| 1089 |
+
ok = server.copy_tensor(input, output);
|
| 1090 |
break;
|
| 1091 |
}
|
| 1092 |
case GRAPH_COMPUTE: {
|
| 1093 |
+
ok = server.graph_compute(input, output);
|
| 1094 |
break;
|
| 1095 |
}
|
| 1096 |
case GET_DEVICE_MEMORY: {
|
|
|
|
| 1102 |
}
|
| 1103 |
default: {
|
| 1104 |
fprintf(stderr, "Unknown command: %d\n", cmd);
|
| 1105 |
+
ok = false;
|
| 1106 |
}
|
| 1107 |
}
|
| 1108 |
+
if (!ok) {
|
| 1109 |
+
break;
|
| 1110 |
+
}
|
| 1111 |
uint64_t output_size = output.size();
|
| 1112 |
if (!send_data(sockfd, &output_size, sizeof(output_size))) {
|
| 1113 |
break;
|