forked from PKU-TANGENT/pku-tangent.github.io
-
Notifications
You must be signed in to change notification settings - Fork 1
/
publications.bib
68 lines (58 loc) · 3.11 KB
/
publications.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
@article{lv2022visual,
title={Visual Subtitle Feature Enhanced Video Outline Generation},
author={Lv, Qi and Cao, Ziqiang and Xie, Wenrui and Wang, Derui and Wang, Jingwen and Hu, Zhiyong and Zhang, Tangkun and Ba, Yuan and Li, Yuanhang and Cao, Min and others},
journal={arXiv preprint arXiv:2208.11307},
year={2022}
}
@inproceedings{zhu2022configure,
title={ConFiguRe: Exploring Discourse-level Chinese Figures of Speech},
author={Zhu, Dawei and Zhan, Qiusi and Zhou, Zhejian and Song, Yifan and Zhang, Jiebin and Li, Sujian},
booktitle={Proceedings of the 29th International Conference on Computational Linguistics},
pages={3374--3385},
year={2022}
}
@inproceedings{xia2022transition,
title={A Transition-based Method for Complex Question Understanding},
author={Xia, Yu and Jiang, Wenbin and Lyu, Yajuan and Li, Sujian},
booktitle={Proceedings of the 29th International Conference on Computational Linguistics},
pages={4203--4211},
year={2022}
}
@article{wang2022learning,
title={Learning Robust Representations for Continual Relation Extraction via Adversarial Class Augmentation},
author={Wang, Peiyi and Song, Yifan and Liu, Tianyu and Lin, Binghuai and Cao, Yunbo and Li, Sujian and Sui, Zhifang},
booktitle={Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing},
year={2022}
}
@inproceedings{li2022inttower,
title={IntTower: the Next Generation of Two-Tower Model for Pre-Ranking System},
author={Li, Xiangyang and Chen, Bo and Guo, HuiFeng and Li, Jingjie and Zhu, Chenxu and Long, Xiang and Li, Sujian and Wang, Yichao and Guo, Wei and Mao, Longxia and others},
booktitle={Proceedings of the 31st ACM International Conference on Information \& Knowledge Management},
pages={3292--3301},
year={2022}
}
@article{wu2022precisely,
title={Precisely the Point: Adversarial Augmentations for Faithful and Informative Text Generation},
author={Wu, Wenhao and Li, Wei and Liu, Jiachen and Xiao, Xinyan and Li, Sujian and Lyu, Yajuan},
booktitle={Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing},
year={2022}
}
@article{wu2022frsum,
title={FRSUM: Towards Faithful Abstractive Summarization via Enhancing Factual Robustness},
author={Wu, Wenhao and Li, Wei and Liu, Jiachen and Xiao, Xinyan and Cao, Ziqiang and Li, Sujian and Wu, Hua},
booktitle={Findings of the Association for Computational Linguistics: EMNLP 2022},
year={2022}
}
@inproceedings{hou2022promoting,
title={Promoting Pre-trained LM with Linguistic Features on Automatic Readability Assessment},
author={Hou, Shudi and Rao, Simin and Xia, Yu and Li, Sujian},
booktitle={Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing},
pages={430--436},
year={2022}
}
@article{wu2022wecheck,
title={WeCheck: Strong Factual Consistency Checker via Weakly Supervised Learning},
author={Wu, Wenhao and Li, Wei and Xiao, Xinyan and Liu, Jiachen and Li, Sujian and Lv, Yajuan},
journal={arXiv preprint arXiv:2212.10057},
year={2022}
}