Skip to content
Snippets Groups Projects
Commit 88056f2d authored by Erdogan, E. (Emre)'s avatar Erdogan, E. (Emre)
Browse files

Upload New File

parent 135be871
Branches main
No related tags found
No related merge requests found
from doctoragent import DoctorAgent
from doctorhuman import DoctorHuman
from random import *
import random
import copy
from prettytable import PrettyTable
random_seeds = [505, 12345, 911, 6895789, 4786012, 271234, 10, 787878, 1724683, 12345678]
random_seed = random_seeds[0]
print("Randomization Seed: " + str(random_seed))
random.seed(random_seed) #seeds used for paper: 505, 12345, 911, 6895789, 4786012, 271234, 10, 787878, 1724683, 12345678
#first experiment
simulation1_duration = 10000
event_counts_1 = [0, 0, 0] #three types of events: ordinary belief addition/revision, abstraction-related belief addition/revision, deliberation (action decision and agent interaction moment)
abs_1 = "L_X_good_communication_Y"
abs_2 = "L_X_expert_Y"
abs_3 = "L_X_L_Y_good_communication_X"
abs_4 = "L_X_L_Y_good_capabilities_X"
abstraction_beliefs = [abs_1, abs_2, abs_3, abs_4]
#agents 1-6 initialized below with given abstraction types, every agent type runs abstraction process differently:
agent1 = DoctorAgent("Agent_1", 1) #after every change (i.e., addition and revision) in the belief base
agent2 = DoctorAgent("Agent_2", 2) #after every 10 rounds
agent3 = DoctorAgent("Agent_3", 3) #after every revision in the belief base (excludes additions)
agent4 = DoctorAgent("Agent_4", 4) #only just before a deliberation
agent5 = DoctorAgent("Agent_5", 5) #only just before a deliberation and if there is a change in the belief base
agent6 = DoctorAgent("Agent_6", 6) #only just before a deliberation and if there is an abstraction-related change in the beliefbase
all_agents_1 = [agent1, agent2, agent3, agent4, agent5, agent6]
for round_sim1 in range(simulation1_duration):
random_event_1 = random.randint(
1, 3
) #there are three types of events, every event happening with equal probability
if random_event_1 == 1: #ordinary belief can be added to/revised in the beliefbase
random_belief = [
str(random.randint(0, 100)) + "_b",
random.randint(0, 1)
] #a random belief is chosen among 100 beliefs, denoted as "number_b", randomly assigned to 1 or 0 ("believe"/"not believe")
for agent_i in range(
len(all_agents_1)): #every agent adds/revises the belief
all_agents_1[agent_i].add_belief(copy.deepcopy(random_belief),
round_sim1)
#print("ordinary belief added/revised")
elif random_event_1 == 2: #abstraction-related belief can be added to/revised in the beliefbase
random_abs = random.randint(
0,
len(abstraction_beliefs) -
1) #a random belief is chosen among abstraction-related beliefs
random_abs_belief = [
abstraction_beliefs[random_abs],
random.randint(0, 1)
] #randomly assigned to 0 or 1
for agent_j in range(
len(all_agents_1)): #every agent adds/revises the belief
all_agents_1[agent_j].add_belief(copy.deepcopy(random_abs_belief),
round_sim1)
#print("abstraction-related belief added/revised")
else: #random_event_1 == 3, deliberation is to be done (i.e., a conflict happens, the agent needs to decide on what to do)
for agent_k in range(len(all_agents_1)): #every agent does deliberation
all_agents_1[agent_k].deliberation(round_sim1)
event_counts_1[random_event_1 - 1] += 1
#results part
print("Experiment 1")
print("All events (simulation duration): " + str(sum(event_counts_1)))
print("Ordinary belief addition/revision: " + str(event_counts_1[0]))
print("Abstraction-related belief addition/revision: " +
str(event_counts_1[1]))
print("Deliberation moment: " + str(event_counts_1[2]))
print("")
experiment1_results = PrettyTable(
["Agent Type", "# Abstraction Updates", "# Consistent Abstractions", "m1"])
for agent_m in range(len(all_agents_1)):
experiment1_results.add_row([
all_agents_1[agent_m].name, all_agents_1[agent_m].abstraction_count,
event_counts_1[2] - all_agents_1[agent_m].inconsistent_abstraction_count,
"%.2f" % ((event_counts_1[2] -
all_agents_1[agent_m].inconsistent_abstraction_count) /
all_agents_1[agent_m].abstraction_count)
])
print(experiment1_results)
#################################################################################################
#second experiment
simulation2_duration = 10000
event_counts_2 = [0, 0, 0]
abss_1 = "L_X_L_Y_good_communication_X"
abss_2 = "L_X_L_Y_good_capabilities_X"
abss_3 = "L_Y_good_communication_X"
abss_4 = "L_Y_good_capabilities_X"
abstraction_beliefss = [abss_1, abss_2, abss_3, abss_4]
#four agents with type 6 abstraction process are initialized below with different communication types
agent_6 = DoctorAgent("Agent_6", 6) #agent_6's communication parameter is set to zero as it does not communicate with the human doctor
agent_7 = DoctorAgent("Agent_7", 6) #agent_7's communication parameter is also set to zero as it does not communicate with the human doctor, but it can randomly guess one of doctor's abstraction-related beliefs
agent_8 = DoctorAgent("Agent_8", 6) #agent_8 asks human doctor about one of doctor's abstraction-related beliefs when it performs an unexpected action during a deliberation
agent_9 = DoctorAgent("Agent_9", 6) #agent_8 asks human doctor about one of doctor's abstraction-related beliefs when it performs an unexpected action during a deliberation
agent_7.guess_type = 1
agent_8.communication_type = 1
agent_9.communication_type = 1
#initialization of agents: abstraction-related beliefs are both assigned to 0
agent_6.add_belief([abss_1, 0], 0) #agent_6 initialized with no perceived trust from its human partner)
agent_6.add_belief([abss_2, 0], 0) #meaning abstraction-related beliefs are both assigned to 0
agent_7.add_belief([abss_1, 0], 0) #agent_6 initialized with no perceived trust from its human partner)
agent_7.add_belief([abss_2, 0], 0) #meaning abstraction-related beliefs are both assigned to 0
agent_8.add_belief([abss_1, 0], 0) #agent_6 initialized with no perceived trust from its human partner)
agent_8.add_belief([abss_2, 0], 0) #meaning abstraction-related beliefs are both assigned to 0
agent_9.add_belief([abss_1, 0], 0) #agent_6 initialized with no perceived trust from its human partner)
agent_9.add_belief([abss_2, 0], 0) #meaning abstraction-related beliefs are both assigned to 0
human_1 = DoctorHuman("Human_1") #instantiation of four human doctors that are capable of adding/revising their beliefs, holding abstractions, and performing deliberations accordingly
human_2 = DoctorHuman("Human_2") #a human doctor can communicate with its agent doctor partner to tell whether a certain action decision of the agent is expected or not
human_3 = DoctorHuman("Human_3") #it can also answer questions about its agent partner's beliefs
human_4 = DoctorHuman("Human_4")
#initialization of human doctors: abstraction-related beliefs are both assigned to 0
human_1.init_human(0, 0)
human_2.init_human(0, 0)
human_3.init_human(0, 0)
human_4.init_human(0, 0)
all_agents_2 = [agent_6, agent_7, agent_8, agent_9]
all_humans = [human_1, human_2, human_3, human_4]
for round_sim2 in range(simulation2_duration):
random_event_2 = random.randint(
1, 3
) #there are three types of events, every event happening with equal probability
#print("")
if random_event_2 == 1: #ordinary belief can be added to/revised in the beliefbase
random_belief_i = [str(random.randint(0, 100)) + "_b", random.randint(0, 1)]
#a random belief is chosen among 100 beliefs, randomly assigned to 0 or 1
for agent_ii in range(len(all_agents_2)): #every agent adds/revises the belief
all_agents_2[agent_ii].add_belief(copy.deepcopy(random_belief_i), round_sim2)
#print("ordinary belief added/revised")
for human in range(len(all_humans)): #every human adds/revises the belief
all_humans[human].add_belief(copy.deepcopy(random_belief_i), round_sim2)
elif random_event_2 == 2: #abstraction-related belief can be added to/revised in beliefbase
random_abs_i = random.randint(0, len(abstraction_beliefss) - 3)
#a random belief is chosen among abstraction-related beliefs for agents
random_abs_belief_i = [abstraction_beliefss[random_abs_i], random.randint(0, 1)] #randomly assigned to 0 or 1
for agent_jj in range(len(all_agents_2)): #every agent adds/revises the belief
all_agents_2[agent_jj].add_belief(copy.deepcopy(random_abs_belief_i), round_sim2)
#print("abstraction-related belief added/revised")
random_abs_belief_ii = [abstraction_beliefss[random_abs_i + 2], random.randint(0, 1)]
for human in range(len(all_humans)):
all_humans[human].add_belief(copy.deepcopy(random_abs_belief_ii), round_sim2)
#lower-order abstraction-related belief (random_abs_belief_ii) can be added to/revised in humans' beliefbases
#50-50 chance being the same value with the corresponding higher-order abstraction-related belief (random_abs_belief_i)
else: #random_event_2 == 3, deliberation is to be done (i.e., a conflict happens, the agent needs to decide on what to do)
for agent_kk in range(len(all_agents_2)):
all_agents_2[agent_kk].deliberation(round_sim2) #every agent does deliberation
all_humans[agent_kk].deliberation_critic(all_agents_2[agent_kk].current_deliberation) #every human doctor evaluates its agent doctor partner's action decision
if (all_humans[1].unexpected_agent_action_alert == 1 and all_agents_2[1].guess_type == 1): #if the human has not expected the guessing agent's action (agent 7):
guess_number = random.randint(0, 1) #agent randomly chooses which abstraction-related belief to guess
all_agents_2[1].guess_single_belief(guess_number, random.randint(0, 1)) #agent randomly guesses the abstraction-related belief's value
if (all_humans[2].unexpected_agent_action_alert == 1 and all_agents_2[2].communication_type == 1): #if the human has not expected the singly-communicating agent's action (agent 8):
abs_rel_bel_number = random.randint(0, 1) #agent randomly chooses which abstraction-related belief to acquire from human
all_agents_2[2].acquire_single_belief(copy.deepcopy(all_humans[2].beliefbase[abs_rel_bel_number]), abs_rel_bel_number) #agent acquires the chosen abstraction-related belief
if (all_humans[3].unexpected_agent_action_alert == 1 and all_agents_2[3].communication_type == 1):
#if the human has not expected the fully-communicating agent's action (agent 9):
asked_beliefs = all_agents_2[3].alert_reaction() #the agent asks human doctor about abstraction-related beliefs,
answered_beliefs = all_humans[3].tell_belief_values(asked_beliefs) #the human tells the abstraction-related beliefs (with their values),
for answered_belief in range(len(answered_beliefs)):
all_agents_2[3].add_belief(["L_X_" + answered_beliefs[answered_belief][0], answered_beliefs[answered_belief][1]], round_sim2)
#the agent adds/revises the abstraction-related beliefs (and updates the associated abstraction accordingly)
event_counts_2[random_event_2 - 1] += 1
#results part
print("")
print("Experiment 2")
print("All events (simulation duration): " + str(sum(event_counts_2)))
print("Ordinary belief addition/revision: " + str(event_counts_2[0]))
print("Abstraction-related belief addition/revision: " + str(event_counts_2[1]))
print("Deliberation moment: " + str(event_counts_2[2]))
print("")
experiment2_results = PrettyTable(["Agent Type", "# Abstraction Updates", "# ToM-Consistent Deliberations", "m2"])
for agent_n in range(len(all_agents_2)):
experiment2_results.add_row([
all_agents_2[agent_n].name, all_agents_2[agent_n].abstraction_count,
event_counts_2[2] - all_humans[agent_n].unexpected_agent_actions,
"%.2f" %
((event_counts_2[2] - all_humans[agent_n].unexpected_agent_actions) /
all_agents_2[agent_n].abstraction_count)
])
print(experiment2_results)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment