diff --git a/docs/zh/examples/viv.md b/docs/zh/examples/viv.md index ee7aad38d..5172a0f84 100644 --- a/docs/zh/examples/viv.md +++ b/docs/zh/examples/viv.md @@ -2,6 +2,12 @@ AI Studio快速体验 +=== "模型训练命令" + + ``` sh + python viv.py + ``` + ## 1. 背景简介 涡激振动(Vortex-Induced Vibration,VIV)是一种流固耦合振动现象,主要发生在流体绕过柱体或管体时。在海洋工程和风工程中,这种振动现象具有重要应用。 @@ -41,9 +47,9 @@ $$ 上式中 $g$ 即为 MLP 模型本身,用 PaddleScience 代码表示如下 -``` py linenums="28" +``` py linenums="31" --8<-- -examples/fsi/viv.py:28:29 +examples/fsi/viv.py:31:32 --8<-- ``` @@ -56,9 +62,9 @@ examples/fsi/viv.py:28:29 由于 VIV 使用的是 VIV 方程,因此可以直接使用 PaddleScience 内置的 `VIV`。 -``` py linenums="31" +``` py linenums="34" --8<-- -examples/fsi/viv.py:31:32 +examples/fsi/viv.py:34:35 --8<-- ``` @@ -76,9 +82,9 @@ examples/fsi/viv.py:31:32 在定义约束之前,需要给监督约束指定文件路径等数据读取配置。 -``` py linenums="34" +``` py linenums="37" --8<-- -examples/fsi/viv.py:34:50 +examples/fsi/viv.py:37:52 --8<-- ``` @@ -86,9 +92,9 @@ examples/fsi/viv.py:34:50 由于我们以监督学习方式进行训练,此处采用监督约束 `SupervisedConstraint`: -``` py linenums="51" +``` py linenums="54" --8<-- -examples/fsi/viv.py:51:57 +examples/fsi/viv.py:54:60 --8<-- ``` @@ -102,9 +108,9 @@ examples/fsi/viv.py:51:57 在监督约束构建完毕之后,以我们刚才的命名为关键字,封装到一个字典中,方便后续访问。 -``` py linenums="58" +``` py linenums="61" --8<-- -examples/fsi/viv.py:58:61 +examples/fsi/viv.py:61:64 --8<-- ``` @@ -112,9 +118,9 @@ examples/fsi/viv.py:58:61 接下来我们需要指定训练轮数和学习率,此处我们按实验经验,使用十万轮训练轮数,并每隔1000个epochs评估一次模型精度。 -``` py linenums="63" +``` yaml linenums="38" --8<-- -examples/fsi/viv.py:63:65 +examples/fsi/conf/viv.yaml:38:51 --8<-- ``` @@ -122,9 +128,9 @@ examples/fsi/viv.py:63:65 训练过程会调用优化器来更新模型参数,此处选择较为常用的 `Adam` 优化器和 `Step` 间隔衰减学习率。 -``` py linenums="67" +``` py linenums="66" --8<-- -examples/fsi/viv.py:67:71 +examples/fsi/viv.py:66:68 --8<-- ``` @@ -136,9 +142,9 @@ examples/fsi/viv.py:67:71 在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此使用 `ppsci.validate.SupervisedValidator` 构建评估器。 -``` py linenums="73" +``` py linenums="70" --8<-- -examples/fsi/viv.py:73:95 +examples/fsi/viv.py:70:92 --8<-- ``` @@ -152,9 +158,9 @@ examples/fsi/viv.py:73:95 本文需要可视化的数据是 $t-\eta$ 和 $t-f$ 两组关系图,假设每个时刻 $t$ 的坐标是 $t_i$,则对应网络输出为 $\eta_i$,升力为 $f_i$,因此我们只需要将评估过程中产生的所有 $(t_i, \eta_i, f_i)$ 保存成图片即可。代码如下: -``` py linenums="97" +``` py linenums="94" --8<-- -examples/fsi/viv.py:97:116 +examples/fsi/viv.py:94:113 --8<-- ``` @@ -162,9 +168,9 @@ examples/fsi/viv.py:97:116 完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估、可视化。 -``` py linenums="118" +``` py linenums="115" --8<-- -examples/fsi/viv.py:118: +examples/fsi/viv.py:115:136 --8<-- ``` diff --git a/examples/fsi/conf/viv.yaml b/examples/fsi/conf/viv.yaml new file mode 100644 index 000000000..996e24f2c --- /dev/null +++ b/examples/fsi/conf/viv.yaml @@ -0,0 +1,58 @@ +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_VIV/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +VIV_DATA_PATH: "./VIV_Training_Neta100.mat" + +# model settings +MODEL: + input_keys: ["t_f"] + output_keys: ["eta"] + num_layers: 5 + hidden_size: 50 + activation: "tanh" + +# training settings +TRAIN: + epochs: 100000 + iters_per_epoch: 1 + save_freq: 1 + eval_during_train: true + eval_freq: 1000 + batch_size: 100 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.001 + step_size: 20000 + gamma: 0.9 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + batch_size: 32 diff --git a/examples/fsi/viv.py b/examples/fsi/viv.py index 90f4ad1e8..eb77f6b0f 100644 --- a/examples/fsi/viv.py +++ b/examples/fsi/viv.py @@ -12,42 +12,45 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os + +import hydra +from omegaconf import DictConfig + import ppsci -from ppsci.utils import config from ppsci.utils import logger -if __name__ == "__main__": - args = config.parse_args() + +def train(cfg: DictConfig): # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(42) + ppsci.utils.misc.set_random_seed(cfg.seed) + # set output directory - OUTPUT_DIR = "./output_viv" if args.output_dir is None else args.output_dir - # initialize logger - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") + logger.init_logger("ppsci", os.path.join(cfg.output_dir, "train.log"), "info") # set model - model = ppsci.arch.MLP(("t_f",), ("eta",), 5, 50, "tanh") + model = ppsci.arch.MLP(**cfg.MODEL) # set equation equation = {"VIV": ppsci.equation.Vibration(2, -4, 0)} # set dataloader config - ITERS_PER_EPOCH = 1 train_dataloader_cfg = { "dataset": { "name": "MatDataset", - "file_path": "./VIV_Training_Neta100.mat", + "file_path": cfg.VIV_DATA_PATH, "input_keys": ("t_f",), "label_keys": ("eta", "f"), "weight_dict": {"eta": 100}, }, - "batch_size": 100, + "batch_size": cfg.TRAIN.batch_size, "sampler": { "name": "BatchSampler", "drop_last": False, "shuffle": True, }, } + # set constraint sup_constraint = ppsci.constraint.SupervisedConstraint( train_dataloader_cfg, @@ -60,25 +63,19 @@ sup_constraint.name: sup_constraint, } - # set training hyper-parameters - EPOCHS = 100000 if args.epochs is None else args.epochs - EVAL_FREQ = 1000 - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.Step( - EPOCHS, ITERS_PER_EPOCH, 0.001, step_size=20000, gamma=0.9 - )() + lr_scheduler = ppsci.optimizer.lr_scheduler.Step(**cfg.TRAIN.lr_scheduler)() optimizer = ppsci.optimizer.Adam(lr_scheduler)((model,) + tuple(equation.values())) # set validator valid_dataloader_cfg = { "dataset": { "name": "MatDataset", - "file_path": "./VIV_Training_Neta100.mat", + "file_path": cfg.VIV_DATA_PATH, "input_keys": ("t_f",), "label_keys": ("eta", "f"), }, - "batch_size": 32, + "batch_size": cfg.EVAL.batch_size, "sampler": { "name": "BatchSampler", "drop_last": False, @@ -96,7 +93,7 @@ # set visualizer(optional) visu_mat = ppsci.utils.reader.load_mat_file( - "./VIV_Training_Neta100.mat", + cfg.VIV_DATA_PATH, ("t_f", "eta_gt", "f_gt"), alias_dict={"eta_gt": "eta", "f_gt": "f"}, ) @@ -119,17 +116,18 @@ solver = ppsci.solver.Solver( model, constraint, - OUTPUT_DIR, + cfg.output_dir, optimizer, lr_scheduler, - EPOCHS, - ITERS_PER_EPOCH, - eval_during_train=True, - eval_freq=EVAL_FREQ, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, equation=equation, validator=validator, visualizer=visualizer, ) + # train model solver.train() # evaluate after finished training @@ -137,17 +135,91 @@ # visualize prediction after finished training solver.visualize() - # directly evaluate model from pretrained_model_path(optional) - logger.init_logger("ppsci", f"{OUTPUT_DIR}/eval.log", "info") + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + + # set output directory + logger.init_logger("ppsci", os.path.join(cfg.output_dir, "eval.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"VIV": ppsci.equation.Vibration(2, -4, 0)} + + # set validator + valid_dataloader_cfg = { + "dataset": { + "name": "MatDataset", + "file_path": cfg.VIV_DATA_PATH, + "input_keys": ("t_f",), + "label_keys": ("eta", "f"), + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + eta_mse_validator = ppsci.validate.SupervisedValidator( + valid_dataloader_cfg, + ppsci.loss.MSELoss("mean"), + {"eta": lambda out: out["eta"], **equation["VIV"].equations}, + metric={"MSE": ppsci.metric.MSE()}, + name="eta_mse", + ) + validator = {eta_mse_validator.name: eta_mse_validator} + + # set visualizer(optional) + visu_mat = ppsci.utils.reader.load_mat_file( + cfg.VIV_DATA_PATH, + ("t_f", "eta_gt", "f_gt"), + alias_dict={"eta_gt": "eta", "f_gt": "f"}, + ) + + visualizer = { + "visualize_u": ppsci.visualize.VisualizerScatter1D( + visu_mat, + ("t_f",), + { + r"$\eta$": lambda d: d["eta"], # plot with latex title + r"$\eta_{gt}$": lambda d: d["eta_gt"], # plot with latex title + r"$f$": equation["VIV"].equations["f"], # plot with latex title + r"$f_{gt}$": lambda d: d["f_gt"], # plot with latex title + }, + num_timestamps=1, + prefix="viv_pred", + ) + } + + # initialize solver solver = ppsci.solver.Solver( model, - constraint, - OUTPUT_DIR, + output_dir=cfg.output_dir, equation=equation, validator=validator, visualizer=visualizer, - pretrained_model_path=f"{OUTPUT_DIR}/checkpoints/latest", + pretrained_model_path=cfg.EVAL.pretrained_model_path, ) + + # evaluate solver.eval() - # visualize prediction from pretrained_model_path(optional) + # visualize prediction solver.visualize() + + +@hydra.main(version_base=None, config_path="./conf", config_name="viv.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main()