Spaces:
Running
Running
David Huang
commited on
Commit
·
418769d
1
Parent(s):
20644bf
Add `--no-op-offload` to improve `-ot` pp perf in MoE models like llama4 400B (llama/13386)
Browse files- ggml/include/ggml-backend.h +2 -2
- ggml/src/ggml-backend.cpp +6 -2
ggml/include/ggml-backend.h
CHANGED
|
@@ -248,7 +248,7 @@ extern "C" {
|
|
| 248 |
// preferrably to run on the same backend as the buffer
|
| 249 |
ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
| 250 |
|
| 251 |
-
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false);
|
| 252 |
|
| 253 |
// initialize buffers from a max size graph (optional)
|
| 254 |
reserve_graph = build_graph(sched, max_batch_size);
|
|
@@ -289,7 +289,7 @@ extern "C" {
|
|
| 289 |
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
|
| 290 |
|
| 291 |
// Initialize a backend scheduler, backends with low index are given priority over backends with high index
|
| 292 |
-
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
|
| 293 |
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
| 294 |
|
| 295 |
// Initialize backend buffers from a measure graph
|
|
|
|
| 248 |
// preferrably to run on the same backend as the buffer
|
| 249 |
ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
| 250 |
|
| 251 |
+
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false, true);
|
| 252 |
|
| 253 |
// initialize buffers from a max size graph (optional)
|
| 254 |
reserve_graph = build_graph(sched, max_batch_size);
|
|
|
|
| 289 |
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
|
| 290 |
|
| 291 |
// Initialize a backend scheduler, backends with low index are given priority over backends with high index
|
| 292 |
+
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel, bool op_offload);
|
| 293 |
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
|
| 294 |
|
| 295 |
// Initialize backend buffers from a measure graph
|
ggml/src/ggml-backend.cpp
CHANGED
|
@@ -674,6 +674,8 @@ struct ggml_backend_sched {
|
|
| 674 |
char * context_buffer;
|
| 675 |
size_t context_buffer_size;
|
| 676 |
|
|
|
|
|
|
|
| 677 |
int debug;
|
| 678 |
};
|
| 679 |
|
|
@@ -766,7 +768,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
|
|
| 766 |
if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
|
| 767 |
int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
|
| 768 |
// check if a backend with higher prio wants to offload the op
|
| 769 |
-
if (src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
|
| 770 |
for (int b = 0; b < src_backend_id; b++) {
|
| 771 |
if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
|
| 772 |
SET_CAUSE(tensor, "1.off");
|
|
@@ -1452,7 +1454,8 @@ ggml_backend_sched_t ggml_backend_sched_new(
|
|
| 1452 |
ggml_backend_buffer_type_t * bufts,
|
| 1453 |
int n_backends,
|
| 1454 |
size_t graph_size,
|
| 1455 |
-
bool parallel
|
|
|
|
| 1456 |
GGML_ASSERT(n_backends > 0);
|
| 1457 |
GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS);
|
| 1458 |
GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU);
|
|
@@ -1497,6 +1500,7 @@ ggml_backend_sched_t ggml_backend_sched_new(
|
|
| 1497 |
}
|
| 1498 |
|
| 1499 |
sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends);
|
|
|
|
| 1500 |
|
| 1501 |
ggml_backend_sched_reset(sched);
|
| 1502 |
|
|
|
|
| 674 |
char * context_buffer;
|
| 675 |
size_t context_buffer_size;
|
| 676 |
|
| 677 |
+
bool op_offload;
|
| 678 |
+
|
| 679 |
int debug;
|
| 680 |
};
|
| 681 |
|
|
|
|
| 768 |
if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
|
| 769 |
int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
|
| 770 |
// check if a backend with higher prio wants to offload the op
|
| 771 |
+
if (sched->op_offload && src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
|
| 772 |
for (int b = 0; b < src_backend_id; b++) {
|
| 773 |
if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
|
| 774 |
SET_CAUSE(tensor, "1.off");
|
|
|
|
| 1454 |
ggml_backend_buffer_type_t * bufts,
|
| 1455 |
int n_backends,
|
| 1456 |
size_t graph_size,
|
| 1457 |
+
bool parallel,
|
| 1458 |
+
bool op_offload) {
|
| 1459 |
GGML_ASSERT(n_backends > 0);
|
| 1460 |
GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS);
|
| 1461 |
GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU);
|
|
|
|
| 1500 |
}
|
| 1501 |
|
| 1502 |
sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends);
|
| 1503 |
+
sched->op_offload = op_offload;
|
| 1504 |
|
| 1505 |
ggml_backend_sched_reset(sched);
|
| 1506 |
|