diff --git a/content/publication/2024-emotion-analysis-survey/index.md b/content/publication/2024-emotion-analysis-survey/index.md index ed21999..f8cbb5a 100644 --- a/content/publication/2024-emotion-analysis-survey/index.md +++ b/content/publication/2024-emotion-analysis-survey/index.md @@ -1,8 +1,8 @@ --- # Documentation: https://sourcethemes.com/academic/docs/managing-content/ -title: "Angry Men, Sad Women: Large Language Models Reflect Gendered Stereotypes in Emotion Attribution" -authors: ["Flor Miriam Plaza-del-Arco","Amanda Cercas Curry", "Alba Curry", "Gavin Abercrombie", "Dirk Hovy"] +title: "Emotion Analysis in NLP: Trends, Gaps and Roadmap for Future Directions" +authors: ["Flor Miriam Plaza-del-Arco", "Alba Curry","Amanda Cercas Curry","Dirk Hovy"] date: 2024-03-28 doi: "" @@ -19,12 +19,12 @@ publication_types: ["3"] publication: "arXiv" publication_short: "arXiv" -abstract: "Large language models (LLMs) reflect societal norms and biases, especially about gender. While societal biases and stereotypes have been extensively researched in various NLP applications, there is a surprising gap for emotion analysis. However, emotion and gender are closely linked in societal discourse. E.g., women are often thought of as more empathetic, while men's anger is more socially accepted. To fill this gap, we present the first comprehensive study of gendered emotion attribution in five state-of-the-art LLMs (open- and closed-source). We investigate whether emotions are gendered, and whether these variations are based on societal stereotypes. We prompt the models to adopt a gendered persona and attribute emotions to an event like 'When I had a serious argument with a dear person'. We then analyze the emotions generated by the models in relation to the gender-event pairs. We find that all models consistently exhibit gendered emotions, influenced by gender stereotypes. These findings are in line with established research in psychology and gender studies. Our study sheds light on the complex societal interplay between language, gender, and emotion. The reproduction of emotion stereotypes in LLMs allows us to use those models to study the topic in detail, but raises questions about the predictive use of those same LLMs for emotion applications." +abstract: "Emotions are a central aspect of communication. Consequently, emotion analysis (EA) is a rapidly growing field in natural language processing (NLP). However, there is no consensus on scope, direction, or methods. In this paper, we conduct a thorough review of 154 relevant NLP publications from the last decade. Based on this review, we address four different questions: (1) How are EA tasks defined in NLP? (2) What are the most prominent emotion frameworks and which emotions are modeled? (3) Is the subjectivity of emotions considered in terms of demographics and cultural factors? and (4) What are the primary NLP applications for EA? We take stock of trends in EA and tasks, emotion frameworks used, existing datasets, methods, and applications. We then discuss four lacunae: (1) the absence of demographic and cultural aspects does not account for the variation in how emotions are perceived, but instead assumes they are universally experienced in the same manner; (2) the poor fit of emotion categories from the two main emotion theories to the task; (3) the lack of standardized EA terminology hinders gap identification, comparison, and future goals; and (4) the absence of interdisciplinary research isolates EA from insights in other fields. Our work will enable more focused research into EA and a more holistic approach to modeling emotions in NLP." # Summary. An optional shortened abstract. summary: "" -tags: ["Emotion attribution","Gender Bias","Large Language Models"] +tags: ["Emotion analysis","Survey","Natural Language Processing"] categories: [] featured: false @@ -36,7 +36,7 @@ featured: false # icon_pack: fab # icon: twitter -url_pdf: https://arxiv.org/pdf/2403.03121.pdf +url_pdf: https://arxiv.org/pdf/2403.01222.pdf url_code: url_dataset: url_poster: @@ -49,7 +49,7 @@ url_video: # To use, add an image named `featured.jpg/png` to your page's folder. # Focal points: Smart, Center, TopLeft, Top, TopRight, Left, Right, BottomLeft, Bottom, BottomRight. image: - caption: 'Stereotypical model biases in gendered emotion attribution' + caption: '' focal_point: "Center" preview_only: false @@ -65,5 +65,5 @@ projects: [integrator] # Simply enter your slide deck's filename without extension. # E.g. `slides: "example"` references `content/slides/example/index.md`. # Otherwise, set `slides: ""`. -slides: "" +slides: "Variation in emotion annotation based on demographics." --- diff --git a/content/publication/2024-emotion-gender-stereotypes/index.md b/content/publication/2024-emotion-gender-stereotypes/index.md index 75cbbad..ed21999 100644 --- a/content/publication/2024-emotion-gender-stereotypes/index.md +++ b/content/publication/2024-emotion-gender-stereotypes/index.md @@ -19,12 +19,12 @@ publication_types: ["3"] publication: "arXiv" publication_short: "arXiv" -abstract: "Emotions are a central aspect of communication. Consequently, emotion analysis (EA) is a rapidly growing field in natural language processing (NLP). However, there is no consensus on scope, direction, or methods. In this paper, we conduct a thorough review of 154 relevant NLP publications from the last decade. Based on this review, we address four different questions: (1) How are EA tasks defined in NLP? (2) What are the most prominent emotion frameworks and which emotions are modeled? (3) Is the subjectivity of emotions considered in terms of demographics and cultural factors? and (4) What are the primary NLP applications for EA? We take stock of trends in EA and tasks, emotion frameworks used, existing datasets, methods, and applications. We then discuss four lacunae: (1) the absence of demographic and cultural aspects does not account for the variation in how emotions are perceived, but instead assumes they are universally experienced in the same manner; (2) the poor fit of emotion categories from the two main emotion theories to the task; (3) the lack of standardized EA terminology hinders gap identification, comparison, and future goals; and (4) the absence of interdisciplinary research isolates EA from insights in other fields. Our work will enable more focused research into EA and a more holistic approach to modeling emotions in NLP." +abstract: "Large language models (LLMs) reflect societal norms and biases, especially about gender. While societal biases and stereotypes have been extensively researched in various NLP applications, there is a surprising gap for emotion analysis. However, emotion and gender are closely linked in societal discourse. E.g., women are often thought of as more empathetic, while men's anger is more socially accepted. To fill this gap, we present the first comprehensive study of gendered emotion attribution in five state-of-the-art LLMs (open- and closed-source). We investigate whether emotions are gendered, and whether these variations are based on societal stereotypes. We prompt the models to adopt a gendered persona and attribute emotions to an event like 'When I had a serious argument with a dear person'. We then analyze the emotions generated by the models in relation to the gender-event pairs. We find that all models consistently exhibit gendered emotions, influenced by gender stereotypes. These findings are in line with established research in psychology and gender studies. Our study sheds light on the complex societal interplay between language, gender, and emotion. The reproduction of emotion stereotypes in LLMs allows us to use those models to study the topic in detail, but raises questions about the predictive use of those same LLMs for emotion applications." # Summary. An optional shortened abstract. summary: "" -tags: ["Emotion analysis","Survey","Natural Language Processing"] +tags: ["Emotion attribution","Gender Bias","Large Language Models"] categories: [] featured: false @@ -36,7 +36,7 @@ featured: false # icon_pack: fab # icon: twitter -url_pdf: https://arxiv.org/pdf/2403.01222.pdf +url_pdf: https://arxiv.org/pdf/2403.03121.pdf url_code: url_dataset: url_poster: @@ -49,7 +49,7 @@ url_video: # To use, add an image named `featured.jpg/png` to your page's folder. # Focal points: Smart, Center, TopLeft, Top, TopRight, Left, Right, BottomLeft, Bottom, BottomRight. image: - caption: '' + caption: 'Stereotypical model biases in gendered emotion attribution' focal_point: "Center" preview_only: false @@ -65,5 +65,5 @@ projects: [integrator] # Simply enter your slide deck's filename without extension. # E.g. `slides: "example"` references `content/slides/example/index.md`. # Otherwise, set `slides: ""`. -slides: "Variation in emotion annotation based on demographics." +slides: "" ---