Skip to content

Commit

Permalink
examples: refine tensor dump in examples/benchmark/benchmark-matmult.cpp
Browse files Browse the repository at this point in the history
  • Loading branch information
zhou.weiguo committed Jun 13, 2024
1 parent 3695a2b commit b7a9d40
Showing 1 changed file with 3 additions and 14 deletions.
17 changes: 3 additions & 14 deletions examples/benchmark/benchmark-matmult.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include "common.h"
#include "ggml.h"
#include "ggml-impl.h"

#include <locale.h>
#include <assert.h>
Expand Down Expand Up @@ -38,18 +39,6 @@ typedef struct {
int8_t qs[QK8_0]; // quants
} block_q8_0;

static inline float ggml_compute_fp16_to_fp32(uint16_t h) {
#if defined(__ARM_NEON)
__fp16 tmp;
memcpy(&tmp, &h, sizeof(uint16_t));
return (float) tmp;
#else
uint16_t tmp;
memcpy(&tmp, &h, sizeof(uint16_t));
return (float) tmp;
#endif
}

static float tensor_sum_elements(const ggml_tensor * tensor) {
double sum = 0;
float floatvalue = 0;
Expand All @@ -75,7 +64,7 @@ static float tensor_sum_elements(const ggml_tensor * tensor) {
for (int j = 0; j < tensor->ne[1]; j++) {
for (int k = 0; k < tensor->ne[0]; k++) {
shortvalue = ((unsigned short *) tensor->data)[j * tensor->ne[0] + k];
floatvalue = ggml_compute_fp16_to_fp32(shortvalue);
floatvalue = GGML_FP16_TO_FP32(shortvalue);
sum += floatvalue;
}
}
Expand All @@ -87,7 +76,7 @@ static float tensor_sum_elements(const ggml_tensor * tensor) {
for (int j = 0; j < tensor->ne[1]; j++) {
blocks = tensor->ne[0] / QK8_0;
for (int i = 0; i < blocks; i++) {
floatvalue = ggml_compute_fp16_to_fp32(quant_datas[j * blocks + i].d);
floatvalue = GGML_FP16_TO_FP32(quant_datas[j * blocks + i].d);
for (int k = 0; k < QK8_0; k++) {
sum += (quant_datas[j * blocks + i].qs[k] * floatvalue);
}
Expand Down

0 comments on commit b7a9d40

Please sign in to comment.