-
Notifications
You must be signed in to change notification settings - Fork 0
/
resnet50_os32_semseg_coco.textproto
119 lines (118 loc) · 3.3 KB
/
resnet50_os32_semseg_coco.textproto
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# proto-file: deeplab2/config.proto
# proto-message: ExperimentOptions
#
# Panoptic-DeepLab with ResNet-50 and output stride 32.
#
############### PLEASE READ THIS BEFORE USING THIS CONFIG ###############
# Before using this config, you need to update the following fields:
# - train_dataset_options.file_pattern: Update the path to the
# training set. e.g., your_dataset/train*.tfrecord
# - eval_dataset_options.file_pattern: Update the path to the
# validation set, e.g., your_dataset/eval*.tfrecord
#########################################################################
#
# References:
# For ResNet, see
# - Kaiming He, et al. "Deep Residual Learning for Image Recognition."
# In CVPR, 2016.
# For Panoptic-DeepLab, see
# - Bowen Cheng, et al. "Panoptic-DeepLab: A Simple, Strong, and Fast Baseline
# for Bottom-Up Panoptic Segmentation." In CVPR, 2020.
# Use a unique experiment_name for each experiment.
experiment_name: "deeplabv3+_n+1_640x640_coco"
model_options {
# Update the path to the initial checkpoint (e.g., ImageNet
# pretrained checkpoint).
initial_checkpoint: "ignore"
backbone {
name: "resnet50"
output_stride: 32
}
decoder {
feature_key: "res5"
decoder_channels: 256
aspp_channels: 256
atrous_rates: 3
atrous_rates: 6
atrous_rates: 9
}
panoptic_deeplab {
low_level {
feature_key: "res3"
channels_project: 64
}
low_level {
feature_key: "res2"
channels_project: 32
}
instance {
enable: false
}
semantic_head {
output_channels: 134
head_channels: 256
}
}
}
trainer_options {
save_checkpoints_steps: 1000
save_summaries_steps: 100
steps_per_loop: 100
loss_options {
semantic_loss {
name: "softmax_cross_entropy"
weight: 1.0
top_k_percent: 1.0
#top_k_percent: 0.2
}
}
solver_options {
base_learning_rate: 0.0005
#training_number_of_steps: 400000
training_number_of_steps: 200000
warmup_steps: 2000
}
}
train_dataset_options {
dataset: "coco_panoptic"
# Update the path to training set.
#file_pattern: "/tf/notebooks/EGIC/res/data/coco2017/train*.tfrecord"
file_pattern: "/content/EGIC/res/data/coco2017/train*.tfrecord"
# Adjust the batch_size accordingly to better fit your GPU/TPU memory.
# Also see Q1 in g3doc/faq.md.
# batch_size: 64
batch_size: 16
crop_size: 256
crop_size: 256
min_resize_value: 640
max_resize_value: 640
augmentations {
min_scale_factor: 0.5
max_scale_factor: 1.5
scale_factor_step_size: 0.1
autoaugment_policy_name: "simple_classification_policy_magnitude_scale_0.2"
}
increase_small_instance_weights: true
small_instance_weight: 3.0
}
eval_dataset_options {
dataset: "coco_panoptic"
# Update the path to validation set.
# just for demonstration purpose
# !!! CAUTION: should be set to val* !!!
#file_pattern: "/tf/notebooks/EGIC/res/data/coco2017/train*.tfrecord"
file_pattern: "/content/EGIC/res/data/coco2017/train*.tfrecord"
batch_size: 1
crop_size: 640
crop_size: 640
min_resize_value: 640
max_resize_value: 640
# Add options to make the evaluation loss comparable to the training loss.
increase_small_instance_weights: true
small_instance_weight: 3.0
}
evaluator_options {
continuous_eval_timeout: -1
save_predictions: true
save_raw_predictions: false
}