Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
class DoctorAgent:
def __init__(self, name, abstraction_type):
self.name = name
self.abstraction_type = abstraction_type
self.communication_type = 0
self.guess_type = 0
self.round_check = 10 #type2 counts the number of rounds passed, it does specific actions every 10 rounds
self.knowledgebase = [["K_X_doctor_Y", 1]] #initialized with the knowledge "K_X_doctor_Y"
self.beliefbase = []
self.abs_0 = "K_X_doctor_Y"
self.abs_1 = "L_X_good_communication_Y"
self.abs_2 = "L_X_expert_Y"
self.abs_3 = "L_X_L_Y_good_communication_X"
self.abs_4 = "L_X_L_Y_good_capabilities_X"
self.abs_5 = "L_Y_good_communication_X"
self.abs_6 = "L_Y_good_capabilities_X"
self.relevant_knowledge = [self.abs_0]
self.relevant_beliefs = [self.abs_1, self.abs_2, self.abs_3, self.abs_4, self.abs_5, self.abs_6]
self.trust_x_y = 0
self.trust_y_x = 0
self.change_flag = 0
self.relevant_change_flag = 0
self.abstraction_count = 0
self.deliberation_count = [0, 0, 0, 0] #[discuss, agree, persuade, consult]
self.revision_count = 0
self.addition_count = 0
self.change_count = 0
self.relevant_change_count = 0
self.inconsistent_abstraction_count = 0
self.current_deliberation = "consult" #beliefbase empty initially, abstractions not set
self.calibration_count = 0
self.tom_consistency_count = 0
def init_all_self_beliefs(self, value): #set every self abstraction-related belief to given value in the beliefbase
self.beliefbase = [[self.abs_1, value], [self.abs_2, value], [self.abs_3, value], [self.abs_4, value]]
self.abstraction()
if value == 1:
self.current_deliberation = "discuss"
def add_belief(self, belief, round):
revision_flag = 0
if not self.beliefbase.__contains__(belief): #check if belief is in the beliefbase
#print("not found")
opposite_value = (belief[1]+1) % 2
if self.beliefbase.__contains__([belief[0], opposite_value]):
#if the opposite belief is in the beliefbase, revise it with the new value
belief_index = self.beliefbase.index([belief[0], opposite_value])
self.beliefbase[belief_index][1] = belief[1]
revision_flag = 1 #agent remembers belief revisions
self.revision_count += 1
else: #if the belief does not exist at all, add it to the beliefbase
self.beliefbase.append(belief)
self.addition_count += 1
self.change_count += 1
if (self.abstraction_type == 1): #type1 checks abstractions after every new change in the beliefbase
self.abstraction()
elif (self.abstraction_type == 3 and revision_flag == 1): #type3 checks abstractions after every revision (does not check after additions)
self.abstraction()
elif (self.abstraction_type == 5): #type5 remembers any change in the beliefbase
self.change_flag = 1
elif (self.abstraction_type == 6 and belief[0] in self.relevant_beliefs): #type6 remembers relevant changes in the beliefbase
self.relevant_change_count += 1
self.relevant_change_flag = 1
else:
pass
if (self.abstraction_type == 2 and round % self.round_check == 0): #type2 checks abstractions after every "self.round_check" rounds
self.abstraction()
def abstraction(self):
if self.knowledgebase.__contains__([self.abs_0, 1]) and self.beliefbase.__contains__([self.abs_1, 1]) and self.beliefbase.__contains__([self.abs_2, 1]): #Means: L_X (Doctor(Y) and Expert(Y) and GoodCommunication(Y) -> Trust(X, Y))
self.trust_x_y = 1
else:
self.trust_x_y = 0
if self.beliefbase.__contains__([self.abs_3, 1]) and self.beliefbase.__contains__([self.abs_4, 1]): #Means: L_X (K_Y (GoodCommunication(X)) and L_Y (GoodCapabilities(X)) -> Trust(Y, X))
self.trust_y_x = 1
else:
self.trust_y_x = 0
#print("Abstractions checked!")
self.abstraction_count += 1
def deliberation(self, round):
if (self.abstraction_type == 4): #type4 checks abstractions only just before deliberation
self.abstraction()
elif (self.abstraction_type == 5 and self.change_flag == 1): #type5 checks abstractions only just before deliberation and if there is an observed change in the beliefbase
self.abstraction()
self.change_flag = 0 #type5 resets change_flag after checking abstractions
elif (self.abstraction_type == 6 and self.relevant_change_flag == 1): #type6 checks abstractions only just before deliberation and if there is an observed relevant (abstraction-related) change in the beliefbase
self.abstraction()
self.relevant_change_flag = 0 #type6 resets relevant_change_flag after checking abstractions
elif (self.abstraction_type == 2 and round % self.round_check == 0): #type2 checks abstractions after every "self.round_check" rounds
self.abstraction()
else:
pass
if self.trust_x_y == 1 and self.trust_y_x == 1:
self.current_deliberation = "discuss"
self.deliberation_count[0] += 1 #print("Discuss!")
elif self.trust_x_y == 1 and self.trust_y_x == 0:
self.current_deliberation = "agree"
self.deliberation_count[1] += 1 #print("Agree!")
elif self.trust_x_y == 0 and self.trust_y_x == 1:
self.current_deliberation = "persuade"
self.deliberation_count[2] += 1 #print("Persuade!")
else:
self.current_deliberation = "consult"
self.deliberation_count[3] += 1 #print("Consult!")
#abstraction consistency check
inc1 = (self.knowledgebase.__contains__([self.abs_0, 1]) and self.beliefbase.__contains__([self.abs_1, 1]) and self.beliefbase.__contains__([self.abs_2, 1])) and self.trust_x_y == 0
inc2 = (not self.knowledgebase.__contains__([self.abs_0, 1]) or not self.beliefbase.__contains__([self.abs_1, 1]) or not self.beliefbase.__contains__([self.abs_2, 1])) and self.trust_x_y == 1
inc3 = (self.beliefbase.__contains__([self.abs_3, 1]) and self.beliefbase.__contains__([self.abs_4, 1])) and self.trust_y_x == 0
inc4 = (not self.beliefbase.__contains__([self.abs_3, 1]) or not self.beliefbase.__contains__([self.abs_4, 1])) and self.trust_y_x == 1
if inc1 or inc2 or inc3 or inc4:
self.inconsistent_abstraction_count += 1
def alert_reaction(self): #ask about abstraction-related beliefs in case of an unexpected deliberation
#function designed for the second experiment where self.trust_x_y is always 0,
#so an action can either be "persuade" or "consult",
#yet the agents asks for the same abstraction-related beliefs nevertheless.
if self.current_deliberation == "consult":
#shouldbe_deliberation = "persuade"
#shouldbe_trust_y_x = 1
return [self.relevant_beliefs[4], self.relevant_beliefs[5]]
else:
#shouldbe_deliberation = "consult"
#shouldbe_trust_x_y = 1
return [self.relevant_beliefs[4], self.relevant_beliefs[5]]
def calibrate(self): #convey abstraction-related beliefs to change/calibrate other's beliefs in case of an unexpected deliberation
#function designed for the third experiment where self.trust_x_y is always 1,
#so an action can either be "discuss" or "agree",
#yet the agents conveys the same abstraction-related beliefs nevertheless.
self.calibration_count += 1
if self.current_deliberation == "discuss": #in our case, "discuss" is the preferred action, but mutual trust is required for that
return [[self.relevant_beliefs[4], 1], [self.relevant_beliefs[5], 1]] #since the agent already trusts the human, it should also ensure the human's trust for itself
else:
return [[self.relevant_beliefs[4], 1], [self.relevant_beliefs[5], 1]]
def acquire_single_belief(self, belief, number):
if (self.communication_type == 1 and self.beliefbase[number][1] != belief[1]):
self.beliefbase[number][1] = belief[1]
self.relevant_change_flag = 1
else:
pass
def guess_single_belief(self, belief_number, belief_value):
if (self.guess_type == 1 and self.beliefbase[belief_number][1] != belief_value):
self.beliefbase[belief_number][1] = belief_value
self.relevant_change_flag = 1
else:
pass