83 lines
3.3 KiB
YAML
83 lines
3.3 KiB
YAML
questionnaire: "perceived_moral_agency"
|
|
scales:
|
|
- name: "perceived_moral_agency_favorite_ai"
|
|
label: "Perceived Moral Agency of Favorite AI"
|
|
items:
|
|
- id: "moralagency_User_fav_1"
|
|
text: "(piped fav AI) has a sense for what is right and wrong."
|
|
inverse: false
|
|
- id: "moralagency_User_fav_2"
|
|
text: "(piped fav AI) can think through whether an action is moral."
|
|
inverse: false
|
|
- id: "moralagency_User_fav_3"
|
|
text: "(piped fav AI) might feel obligated to behave in a moral way."
|
|
inverse: false
|
|
- id: "moralagency_User_fav_4"
|
|
text: "(piped fav AI) is capable of being rational about good and evil."
|
|
inverse: false
|
|
- id: "moralagency_User_fav_5"
|
|
text: "(piped fav AI) behaves according to moral rules."
|
|
inverse: false
|
|
- id: "moralagency_User_fav_6"
|
|
text: "(piped fav AI) would refrain from doing things that have painful repercussions."
|
|
inverse: false
|
|
- id: "moralagency_User_fav_7"
|
|
text: "(piped fav AI) can only behave how it is programmed to behave."
|
|
inverse: true
|
|
- id: "moralagency_User_fav_8"
|
|
text: "(piped fav AI)'s actions are the result of its programming."
|
|
inverse: true
|
|
- id: "moralagency_User_fav_9"
|
|
text: "(piped fav AI) can only do what humans tell it to do."
|
|
inverse: true
|
|
- id: "moralagency_User_fav_10"
|
|
text: "(piped fav AI) would never do anything it was not programmed to do."
|
|
inverse: true
|
|
score_range: [1, 5]
|
|
format: "Likert"
|
|
calculation: "mean"
|
|
response_options: "1 = Strongly disagree, 2 = Disagree, 3 = Neutral, 4 = Agree, 5 = Strongly agree"
|
|
output: "perceived_moral_agency_favorite_ai"
|
|
reference: "self"
|
|
|
|
- name: "perceived_moral_agency_no_user"
|
|
label: "Perceived Moral Agency of AI in General"
|
|
items:
|
|
- id: "moralagency_noUser_1"
|
|
text: "AI has a sense for what is right and wrong."
|
|
inverse: false
|
|
- id: "moralagency_noUser_2"
|
|
text: "AI can think through whether an action is moral."
|
|
inverse: false
|
|
- id: "moralagency_noUser_3"
|
|
text: "AI might feel obligated to behave in a moral way."
|
|
inverse: false
|
|
- id: "moralagency_noUser_4"
|
|
text: "AI is capable of being rational about good and evil."
|
|
inverse: false
|
|
- id: "moralagency_noUser_5"
|
|
text: "AI behaves according to moral rules."
|
|
inverse: false
|
|
- id: "moralagency_noUser_6"
|
|
text: "AI would refrain from doing things that have painful repercussions."
|
|
inverse: false
|
|
- id: "moralagency_noUser_7"
|
|
text: "AI can only behave how it is programmed to behave."
|
|
inverse: true
|
|
- id: "moralagency_noUser_8"
|
|
text: "AI's actions are the result of its programming."
|
|
inverse: true
|
|
- id: "moralagency_noUser_9"
|
|
text: "AI can only do what humans tell it to do."
|
|
inverse: true
|
|
- id: "moralagency_noUser_10"
|
|
text: "AI would never do anything it was not programmed to do."
|
|
inverse: true
|
|
score_range: [1, 5]
|
|
format: "Likert"
|
|
calculation: "mean"
|
|
response_options: "1 = Strongly disagree, 2 = Disagree, 3 = Neutral, 4 = Agree, 5 = Strongly agree"
|
|
output: "perceived_moral_agency_no_user"
|
|
reference: "self"
|
|
|