Skip to content
Snippets Groups Projects
experiment_3.py 5.72 KiB
Newer Older
Erdogan, E. (Emre)'s avatar
Erdogan, E. (Emre) committed
from doctoragent import DoctorAgent
from doctorhuman import DoctorHuman
from random import *
import random
import copy
from prettytable import PrettyTable

random_seeds = [505, 12345, 911, 6895789, 4786012, 271234, 10, 787878, 1724683, 12345678]
random_seed = random_seeds[9]
print("Randomization Seed: " + str(random_seed))

random.seed(random_seed) #seeds used for paper: 505, 12345, 911, 6895789, 4786012, 271234, 10, 787878, 1724683, 12345678

#third experiment
#doctor agents' beliefs do not change during this experiment, only doctor humans' beliefs changes

simulation3_duration = 10000
event_counts_3 = [0, 0, 0]  #three types of events: ordinary belief addition/revision, abstraction-related belief addition/revision, deliberation (action decision and agent interaction moment)

absss_1 = "L_Y_good_communication_X"
absss_2 = "L_Y_good_capabilities_X"
abstraction_beliefss = [absss_1, absss_2]

#two agents with type 6 abstraction behaviour initialized below with given abstraction types
agent_10 = DoctorAgent("Agent_10", 6)  #agent_10 demonstrates human doctor its capabilites and communication skills when it performs an unexpected action during a deliberation
agent_11 = DoctorAgent("Agent_11", 6)  #agent_11 demonstrates human doctor its capabilites and communication skills with after every 10 rounds
agent_10.init_all_self_beliefs(1)  #agent_10's trust for its human partner is set
agent_11.init_all_self_beliefs(1)  #agent_11's trust for its human partner is set
agent_10.communication_type = 1  #because agent_10's communication parameter is set to one, it can change the human doctor's abstractions via demonstrations
agent_11.communication_type = 1  #because agent_11's communication parameter is set to one, it can change the human doctor's abstractions via demonstrations

human_5 = DoctorHuman("Human_5")  #two human doctors that are capable of adding/revising their beliefs, holding abstractions, and performing deliberations accordingly
human_6 = DoctorHuman("Human_6")  #each can communicate its satisfaction with its agent partner's action decisions and also answer its agent partner's questions
human_5.init_human(0, 0)  #initial belief configuration of human_5 is set (i.e., abstraction-related beliefs are both assigned to 0)
human_6.init_human(0, 0)  #initial belief configuration of human_6 is set (i.e., abstraction-related beliefs are both assigned to 0)
all_agents_3 = [agent_10, agent_11]
all_humanss = [human_5, human_6]

for round_sim3 in range(simulation3_duration):
  random_event_3 = random.randint(1, 3)  #there are three types of events, every event happening with equal probability
  #print("")
  if random_event_3 == 1:  #ordinary belief can be added to/revised in the beliefbase
    random_belief_ii = [str(random.randint(0, 100)) + "_b", random.randint(0, 1)]  #a random belief is chosen among 100 beliefs, randomly assigned to 0 or 1
    for human in range(len(all_humanss)):  #every human adds/revises the belief
      all_humanss[human].add_belief(copy.deepcopy(random_belief_ii), round_sim3)

  elif random_event_3 == 2:  #abstraction-related belief can be added to/revised in beliefbase
    random_abs_ii = random.randint(0, len(abstraction_beliefss) - 1)  #a random belief is chosen among abstraction-related beliefs for agents
    random_abs_belief_ii = [abstraction_beliefss[random_abs_ii], random.randint(0, 1)]  #randomly assigned to 0 or 1
    for human in range(len(all_humanss)):
      all_humanss[human].add_belief(copy.deepcopy(random_abs_belief_ii), round_sim3)  #lower-order abstraction-related belief (random_abs_belief_ii) can be added to/revised in humans' beliefbases

  else:  #random_event_3 == 3, deliberation is to be done (because of a diagnostic conflict, the agent needs to decide on what to do)
    for agent_kkk in range(len(all_agents_3)):
      all_agents_3[agent_kkk].deliberation(round_sim3)  #every agent does deliberation
      all_humanss[agent_kkk].deliberation_critic2(all_agents_3[agent_kkk].current_deliberation)  #every humans evaluates the partner agent's deliberation
    if (all_humanss[0].unexpected_agent_action_alert == 1 and all_agents_3[0].communication_type == 1):  #if the human has not expected the agent_11's action:
      #the agent 10 understands it is because of the human doctor's abstractions, does necessary action to calibrate them
      beliefs_to_change = all_agents_3[0].calibrate()
      all_humanss[0].add_belief(beliefs_to_change[0], round_sim3)
      all_humanss[0].add_belief(beliefs_to_change[1], round_sim3)
      #the human revises own abstraction-related beliefs accordingly

  if (round_sim3 % all_agents_3[1].round_check == 0 and all_agents_3[1].communication_type == 1):  #if the current round is a multiple of agent_10's round_check value:
    #the agent does necessary action to calibrate human's abstractions
    beliefss_to_change = all_agents_3[1].calibrate()
    all_humanss[1].add_belief(beliefss_to_change[0], round_sim3)
    all_humanss[1].add_belief(beliefss_to_change[1], round_sim3)  
  
  event_counts_3[random_event_3 - 1] += 1

#results part
print("")
print("Experiment 3")
print("All events (simulation duration): " + str(sum(event_counts_3)))
print("Ordinary belief addition/revision: " + str(event_counts_3[0]))
print("Abstraction-related belief addition/revision: " + str(event_counts_3[1]))
print("Deliberation moment: " + str(event_counts_3[2]))
print("")

experiment3_results = PrettyTable(["Agent Type", "# Calibrations", "# Expected Deliberations", "m3"])
for agent_n in range(len(all_agents_3)):
  experiment3_results.add_row([all_agents_3[agent_n].name, all_agents_3[agent_n].calibration_count, event_counts_3[2]-all_humanss[agent_n].unexpected_agent_actions, "%.2f" % ((event_counts_3[2]-all_humanss[agent_n].unexpected_agent_actions)/all_agents_3[agent_n].calibration_count)])
print(experiment3_results)