-
-
Notifications
You must be signed in to change notification settings - Fork 40
/
example_config.toml
50 lines (42 loc) · 1.26 KB
/
example_config.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# https://github.com/toml-lang/toml
# <target_name>.<component>.<part>
# we use string 'None' to represent python None value
# you should convert the string to None if you try to write extension for utensor_cgen
[utensor.backend]
legacy-api = true
[utensor.backend.tensor_alloc_planner]
max_pool_size = 1048576
include_inputs = false
out_fname = "None"
enabled = true
[utensor.backend.legacy_code_generator]
src_fname = "None"
params_dir = "data"
embed_params_dir = "/fs/data"
model_dir = "models"
debug_cmt = false
[utensor.backend.legacy_graph_lower]
[utensor.backend.rearch_code_generator]
src_fname = "None"
header_fname = "None"
params_dir = "data"
model_dir = "models"
meta_data_pool_size = "auto"
ram_data_pool_size = "auto"
[utensor.backend.rearch_graph_lower]
[utensor.backend.pipeline_transformer]
save_graph = false
transform_methods = [ "dropout(name_pattern=r'(dropout[_\\w\\d]*)/.*')", "linear_reorder", "quantize", "conv_pool", "inline", "biasAdd", "remove_id_op", "fake_gather_v2", "refcnt",]
[utensor.backend.tensor_alloc_planner.aesthetic_kwargs]
split_on_large_graph = true
num_tensors_per_split = 20
figsize = "None"
fontsize = 12
lw = 12
rand_seed = 1111
[utensor.backend.tensor_alloc_planner.dtype_size_map]
float = 4
double = 8
uint8 = 1
int = 4
long = 8