preprocessing/config/questionnaires/effect_on_behavior_toward_people.yaml
2025-12-15 13:47:28 +01:00

48 lines
1.9 KiB
YAML

questionnaire: "effect_on_behavior_toward_people"
scales:
- name: "effect_on_behavior_toward_people_user"
label: "Perceived Effect of AI Interaction on Behavior Toward People"
items:
- id: "effectpeople_User_1"
text: "The way I act toward AI affects how I interact with people."
inverse: false
- id: "effectpeople_User_2"
text: "Being polite to AI makes me more polite to others."
inverse: false
- id: "effectpeople_User_3"
text: "I talk to other people the same way I talk to AI."
inverse: false
- id: "effectpeople_User_4"
text: "I treat other people the same way I treat AI."
inverse: false
score_range: [1, 5]
format: "Likert"
calculation: "mean"
response_options: "1 = Strongly disagree, 2 = Disagree, 3 = Neutral, 4 = Agree, 5 = Strongly agree"
output: "effect_on_behavior_toward_people_user"
reference: "self"
retain_single_items: true
- name: "effect_on_behavior_toward_people_no_user"
label: "Perceived Potential Effect of AI Interaction on Behavior Toward People (Non-Users)"
items:
- id: "effectpeople_noUser_1"
text: "The way I act toward AI would affect how I interact with people."
inverse: false
- id: "effectpeople_noUser_2"
text: "Being polite to AI would make me more polite to others."
inverse: false
- id: "effectpeople_noUser_3"
text: "I would talk to other people the same way I talk to AI."
inverse: false
- id: "effectpeople_noUser_4"
text: "I would treat other people the same way I treat AI."
inverse: false
score_range: [1, 5]
format: "Likert"
calculation: "mean"
response_options: "1 = Strongly disagree, 2 = Disagree, 3 = Neutral, 4 = Agree, 5 = Strongly agree"
output: "effect_on_behavior_toward_people_no_user"
reference: "self"