-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathactivation.h
81 lines (69 loc) · 3.1 KB
/
activation.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
#ifndef VALLEX_CPP_ACTIVATION_H
#define VALLEX_CPP_ACTIVATION_H
#include "common.h"
class MultiheadAttention : public NN::Module<
ggml_tensor *,
/* x: Tensor,*/ ggml_tensor *,
/*key_padding_mask: Optional[Tensor] = None,*/ ggml_tensor *,
/*need_weights: bool = True,*/ bool,
/*attn_mask: Optional[Tensor] = None,*/ ggml_tensor *,
/*average_attn_weights: bool = True,*/ bool,
/*past_kv = None,*/ ggml_tensor *,
/*use_cache = False*/ bool,
/* k_cache*/ ggml_tensor *,
/* v_cache */ ggml_tensor *
> {
public:
MultiheadAttention(
int64_t embed_dim,
int64_t num_heads,
float dropout,
bool bias,
bool add_bias_kv,
bool add_zero_attn,
int64_t kdim,
int64_t vdim,
bool batch_first,
ggml_tensor *linear1_cls,
ggml_tensor *linear2_cls,
/*ggml_tensor* device=,*/
ggml_type dtype
);
public:
size_t compute_params_mem_size(ggml_type wtype) override;
void init_params(struct ggml_context *ctx, ggml_type wtype) override;
void mapping_tensor(std::map<std::string, struct ggml_tensor *> &tensors, std::string prefix) override;
struct ggml_tensor *
forward(
vallex_compute_context *ctx,
/* x: Tensor,*/ ggml_tensor *x,
/*key_padding_mask: Optional[Tensor] = None,*/ ggml_tensor *key_padding_mask,
/*need_weights: bool = True,*/ bool need_weights,
/*attn_mask: Optional[Tensor] = None,*/ ggml_tensor *attn_mask,
/*average_attn_weights: bool = True,*/ bool average_attn_weights,
/*past_kv = None,*/ ggml_tensor *past_kv,
/*use_cache = False*/ bool use_cache,
ggml_tensor *k_cache,
ggml_tensor *v_cache
) override;
struct ggml_tensor *
infer(
vallex_compute_context *ctx,
/* x: Tensor,*/ ggml_tensor *x,
/*key_padding_mask: Optional[Tensor] = None,*/ ggml_tensor *key_padding_mask,
/*need_weights: bool = True,*/ bool need_weights,
/*attn_mask: Optional[Tensor] = None,*/ ggml_tensor *attn_mask,
/*average_attn_weights: bool = True,*/ bool average_attn_weights,
/*past_kv = None,*/ ggml_tensor *past_kv,
/*use_cache = False*/ bool use_cache,
ggml_tensor *k_cache,
ggml_tensor *v_cache
);
struct ggml_tensor *in_proj_weight;
struct ggml_tensor *in_proj_bias;
struct ggml_tensor *out_proj_weight;
struct ggml_tensor *out_proj_bias;
int64_t embed_dim;
int64_t num_heads;
};
#endif //VALLEX_CPP_ACTIVATION_H