Exemplo n.º 1
0
def main(graph_name):
 


   cutting_day=175  # to separate   training-testing





   G = nx.read_gml(graph_name)


   all_team="NO"   # as adopters or not

   list_id_weekends_T3=look_for_T3_weekends(G)  # T3 doesnt share fellows in the weekend  (but they are the exception)


   dir_real_data='../Results/'
   Nbins=20   # for the histogram of sum of distances


   delta_end=3.  # >= than + or -  dr difference at the end of the evolution (NO realization ends up closer than this!!!! if 2, i get and empty list!!!)

   Niter=1000
  
   fixed_param=""#FIXED_Pimm0_"    # or ""  # for the Results file that contains the sorted list of best parameters


######################################################################################
#  I read the file of the actual evolution of the idea spreading in the hospital:   ##
######################################################################################



  
   filename_actual_evol="../Results/Actual_evolution_adopters_from_inference.dat"
  


   file1=open(filename_actual_evol,'r')         ## i read the file:  list_dates_and_names_current_adopters.txt  (created with: extract_real_evolution_number_adopters.py)
   list_lines_file=file1.readlines()
            

   list_actual_evol=[]  
   for line in list_lines_file:      # [1:]:   # i exclude the first row   
     
      num_adopters= float(line.split("\t")[1])          
      list_actual_evol.append(num_adopters)



   list_actual_evol_training=list_actual_evol[:cutting_day]
  # list_actual_evol_testing=list_actual_evol[(cutting_day-1):]  # i dont need this one
   
  
##################################################################


   prob_min=0.10
   prob_max=1.01
   delta_prob=0.1
   
   

   prob_Immune_min=0.0    
   prob_Immune_max=1.001
   delta_prob_Immune=0.1

# threshold is not personal, and set randomly to a value (0,1)
 
                   # of a single encounter with an infected  (it cant be zero or it doesnt make sense!)
   dose_min=0.05              #infect_threshold_min
   dose_max=1.001         #######infect_threshold_min/10.
   delta_dose=0.05           ##infect_threshold_min/10.


   dir="../Results/weight_shifts/infection/"       

   dict_filenames_tot_distance={}   # i will save the filename as key and the tot distance from that curve to the original one
   dict_filenames_prod_distances={}   

  

   prob_Immune=prob_Immune_min
   while prob_Immune<= prob_Immune_max:
        
      print "prom Immune:",prob_Immune        

      prob_infection=prob_min
      while prob_infection<= prob_max:
                 
            print "  p:",prob_infection                              
            
            dose=dose_min
            while dose <= dose_max:
               
               print "  dose:",dose





               output_file2=dir+"Average_time_evolution_Infection_memory_training_p"+str(prob_infection)+"_"+"Immune"+str(prob_Immune)+"_threshold_from_distrib_dose"+str(dose)+"_"+str(Niter)+"iter_day"+str(cutting_day)+"_A_F_inferred.dat"
             #  file2 = open(output_file2,'wt')                                       
              # file2.close()
               



      
               list_lists_t_evolutions=[]     # i create the empty list of list for the Niter temporal evolutions
               
               list_dist_fixed_parameters=[]
               list_dist_at_ending_point_fixed_parameters=[]
               list_dist_abs_at_ending_point_fixed_parameters=[]
              
               
               for iter in range(Niter):
            
              #    print "     iter:",iter


               
                  

            ########### set I.C.


                  list_I=[]  #list infected doctors
                  max_order=0
                  for n in G.nodes():
                     G.node[n]["status"]="S"  # all nodes are Susceptible
                     G.node[n]["infec_value"]=0. 
                     G.node[n]["personal_threshold"]=random.random()  # for a dr to become infected

                     if G.node[n]['type']=="shift":                        
                        if  G.node[n]['order']>max_order:
                           max_order=G.node[n]['order'] # to get the last shift-order for the time loop
                     else:
                        if G.node[n]['label']=="Wunderink"  or G.node[n]["label"]=="Weiss":           
                           G.node[n]["status"]="I"                       
                           G.node[n]["infec_value"]=G.node[n]["personal_threshold"]+ 1.
                           list_I.append(G.node[n]['label'])
          

            
           
                  list_single_t_evolution=[]
                  list_single_t_evolution.append(2.0)  # I always start with TWO infected doctors!!


                  for n in G.nodes():   # i make some DOCTORs INMUNE  (anyone except Weiss and Wunderink)
                     if (G.node[n]['type']=="A") or ( G.node[n]['type']=="F"):
                        if G.node[n]['label']!="Wunderink"  and G.node[n]["label"]!="Weiss": 
                           rand=random.random()
                           if rand< prob_Immune:
                              G.node[n]["status"]="Immune"
                              


        
  
                  ################# the dynamics starts: 
            
                  t=1
                  while t< cutting_day:  # loop over shifts, in order           
                     for n in G.nodes():
                        if G.node[n]['type']=="shift" and G.node[n]['order']==t:
                           shift_length=int(G.node[n]['shift_length'])

                           if shift_length==2 and n not in list_id_weekends_T3:
                              shift_length=1   # because during weekends, the fellow does rounds one day with Att1 and the other day with Att2.  (weekend shifts for T3 are two day long, with no sharing fellows)

                           flag_possible_infection=0
                           for doctor in G.neighbors(n): #first i check if any doctor is infected in this shift
                              if G.node[doctor]["status"]=="I":
                                 flag_possible_infection=1
                                

                           if flag_possible_infection:
                              for doctor in G.neighbors(n): # then the doctors in that shift, gets infected with prob_infection

                                 for i in range(shift_length):   # i repeat the infection process several times, to acount for shift lenght
                                    if G.node[doctor]["status"]=="S":
                                       rand=random.random()
                                       if rand<prob_infection:  # with prob p the infection occurres
                                          
                                          G.node[doctor]["infec_value"]+=dose  # and bumps the infection_value of that susceptible dr
                                          
                                          if G.node[doctor]["infec_value"]>= G.node[doctor]["personal_threshold"]:  # the threshold for infection is personal
                                             
                                             G.node[doctor]["status"]="I"
                                             
                                        
                                                
                                             list_I.append(G.node[doctor]["label"])
                                                

                     list_single_t_evolution.append(float(len(list_I)))

                     t+=1   
                     ######## end t loop





          


                  list_lists_t_evolutions.append(list_single_t_evolution)
             
 
                  #print "actual:",len(list_actual_evol_training),"  simu:",len(list_single_t_evolution)
                  list_dist_fixed_parameters.append(compare_real_evol_vs_simus_to_be_called.compare_two_curves( list_actual_evol_training,list_single_t_evolution))
                  
                  list_dist_abs_at_ending_point_fixed_parameters.append( abs(list_single_t_evolution[-1]-list_actual_evol_training[-1]) )   # i save the distance at the ending point between the current simu and actual evol
                  list_dist_at_ending_point_fixed_parameters.append( list_single_t_evolution[-1]-list_actual_evol_training[-1])    # i save the distance at the ending point between the current simu and actual evol

                           
                  ######## end loop Niter for the training fase
      

       


       
       
               list_pair_dist_std_delta_end=[]
               
               list_pair_dist_std_delta_end.append(numpy.mean(list_dist_fixed_parameters) )   # average dist between the curves over Niter
               list_pair_dist_std_delta_end.append(numpy.std(list_dist_fixed_parameters) )
               
               list_pair_dist_std_delta_end.append(numpy.mean(list_dist_abs_at_ending_point_fixed_parameters))
               

               value=numpy.mean(list_dist_fixed_parameters) *numpy.mean(list_dist_abs_at_ending_point_fixed_parameters)# if SD=0, it is a problem, because then that is the minimun value, but not the optimum i am looking for!!
    
               dict_filenames_prod_distances[output_file2]=  value

               
               if (numpy.mean(list_dist_abs_at_ending_point_fixed_parameters)) <= delta_end:  # i only consider situations close enough at the ending point   
                  
                  dict_filenames_tot_distance[output_file2]=list_pair_dist_std_delta_end
                                                                                          

                  histogram_filename="../Results/weight_shifts/histogr_raw_distances_ending_infection_memory_training_p"+str(prob_infection)+"_"+"Immune"+str(prob_Immune)+"_threshold_from_distrib_dose"+str(dose)+"_"+str(Niter)+"iter_day"+str(cutting_day)+"_A_F_inferred.dat"
                  histograma_gral_negv_posit.histograma(list_dist_at_ending_point_fixed_parameters,histogram_filename)
                  
                  histogram_filename2="../Results/weight_shifts/histogr_sum_dist_traject_infection_memory_training_p"+str(prob_infection)+"_"+"Immune"+str(prob_Immune)+"_threshold_from_distrib_dose"+str(dose)+"_"+str(Niter)+"iter_day"+str(cutting_day)+"_A_F_inferred.dat"
                  
                  histograma_bines_gral.histograma_bins(list_dist_fixed_parameters,Nbins,histogram_filename2)
                  
                  
                  print  "written histogram file: ",histogram_filename
                  print  "written histogram file: ",histogram_filename2


               dose+= delta_dose          
            prob_infection+= delta_prob
      prob_Immune+= delta_prob_Immune



 

   string_name="infection_memory_training_"+fixed_param+str(Niter)+"iter_day"+str(cutting_day)+"_A_F_inferred.dat"   # for the "Results" file with the sorted list of files
   
   list_order_dict= compare_real_evol_vs_simus_to_be_called.pick_minimum_same_end(dict_filenames_tot_distance,string_name,Niter,cutting_day)
# it returns a list of tuples like this :  ('../Results/network_final_schedule_withTeam3_local/infection/Average_time_evolution_Infection_training_p0.7_Immune0.0_2iter_2012.dat', [2540.0, 208.0, 1.0])  the best set of parameters  being the fist one of the elements in that list.



   
   
   
   list_order_dict2= compare_real_evol_vs_simus_to_be_called.pick_minimum_prod_distances(dict_filenames_prod_distances,string_name,Niter,cutting_day)


   

   prob_infection=float(list_order_dict[0][0].split("_p")[1].split("_")[0])
   prob_Immune=float(list_order_dict[0][0].split("_Immune")[1].split("_")[0]) 
   dose=float(list_order_dict[0][0].split("_dose")[1].split("_")[0])
 
  
   
  
   print "\nOptimum parameters (old method) at day",cutting_day," are: p=",prob_infection," Pimmune=",prob_Immune," infection threshold from distribution, and dose=",dose
   


  # optimum_filename=list_order_dict2[0][0]

   prob_infection=float(list_order_dict2[0][0].split("_p")[1].split("_")[0])
   prob_Immune=float(list_order_dict2[0][0].split("_Immune")[1].split("_")[0])  
   dose=float(list_order_dict2[0][0].split("_dose")[1].split("_")[0])
 
  
 
   print "Optimum parameters (product of distances and SDs) at day",cutting_day," are: p=",prob_infection," Pimmune=",prob_Immune," infection threshold from distribution, and dose=",dose
Exemplo n.º 2
0
def main():

    database = "CK_users2009_2012_collected_june2013"
    server = "tarraco.chem-eng.northwestern.edu"
    user = "******"
    passwd = "tiyp,julia"
    db = Connection(server, database, user, passwd)

    flag_weighin_history = 0
    flag_blog_comments = 0
    flag_daily_steps = 0
    flag_favorite_blogs = 0
    flag_forum_posts = 0
    flag_forums = 0
    flag_friends = 0
    flag_ignore = 0
    flag_membership_periods = 0
    flag_private_messages = 0
    flag_lesson_comments = 0
    flag_public_diary = 0
    flag_public_group_memberships = 0
    flag_homepage_comments = 0
    flag_activity_combined = 0
    flag_users = 1
    flag_get_users_act_prior2009 = 1

    ################  weigh-in history table

    if flag_weighin_history == 1:

        db.execute("DROP TABLE IF EXISTS weigh_in_history")
        db.execute("""                      
            CREATE TABLE  weigh_in_history  
            (        
             on_day           DATETIME,
             ck_id            CHAR(36),     
             weight           FLOAT,                        
             id               INT(11),
             activity_flag    CHAR(3)                 
            )
          """)

        file_name3 = "data_2009_2012_collected_june2013/weighin_history.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        empty_cases = 0
        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            yy = int(list_one_line[0].split("-")[0])
            mm = int(list_one_line[0].split("-")[1])
            dd = int(list_one_line[0].split("-")[2])
            on_day = datetime(yy, mm, dd)

            ck_id = str(list_one_line[1])

            if list_one_line[2]:  # to check that the string is not empty
                weight = float(list_one_line[2].strip("\n\r").strip())

                activity_flag = "WI"

                db.execute(
                    """
                INSERT INTO weigh_in_history (on_day , ck_id, weight, id, activity_flag)
                VALUES (%s, %s, %s, %s, %s)
                """, str(on_day), str(ck_id), str(weight), str(contador),
                    str(activity_flag))

                print contador, on_day, ck_id, weight, type(weight), len(
                    str(weight)), activity_flag

                contador += 1

    ################  blog comments table

    if flag_blog_comments == 1:

        db.execute("DROP TABLE IF EXISTS blog_comments")
        db.execute("""                      
            CREATE TABLE  blog_comments  
            (        
               at_time        DATETIME,
               post_date      DATETIME,           
               poster         CHAR(36),
               owner          CHAR(36),
               activity_flag  CHAR(3),
               id             INT(11) 
            )
          """)

        file_name3 = "data_2009_2012_collected_june2013/blog_comments.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            yy = int(list_one_line[0].split("T")[0].split("-")[0])
            mm = int(list_one_line[0].split("T")[0].split("-")[1])
            dd = int(list_one_line[0].split("T")[0].split("-")[2])

            hh = int(list_one_line[0].split("T")[1].split(":")[0])
            mts = int(list_one_line[0].split("T")[1].split(":")[1])
            ss = int(list_one_line[0].split("T")[1].split(":")[2])

            at_time = datetime(yy, mm, dd, hh, mts, ss)

            yy = int(list_one_line[1].split("-")[0])
            mm = int(list_one_line[1].split("-")[1])
            dd = int(list_one_line[1].split("-")[2])

            post_date = datetime(yy, mm, dd)

            poster = str(list_one_line[2])
            owner = str(list_one_line[3])

            activity_flag = "BC"

            db.execute(
                """
                INSERT INTO blog_comments (at_time , post_date, poster, owner, activity_flag, id)
                VALUES (%s, %s, %s, %s, %s, %s)
                """, str(at_time), str(post_date), str(poster), str(owner),
                str(activity_flag), str(contador))

            print contador, str(at_time), str(post_date), str(poster), str(
                owner), str(activity_flag), str(contador)
            contador += 1

    ################  daily steps table

    if flag_daily_steps == 1:

        db.execute("DROP TABLE IF EXISTS daily_steps")
        db.execute("""                      
       CREATE TABLE daily_steps
       (
         on_day        DATETIME,
         ck_id         CHAR(36),           
         steps         CHAR(36),        
         id             INT(11)
        
       )
     """)

        file_name3 = "data_2009_2012_collected_june2013/daily_steps.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            yy = int(list_one_line[0].split("-")[0])
            mm = int(list_one_line[0].split("-")[1])
            dd = int(list_one_line[0].split("-")[2])

            on_day = datetime(yy, mm, dd)

            ck_id = str(list_one_line[1])
            steps = str(list_one_line[2])

            db.execute(
                """
                INSERT INTO daily_steps (on_day , ck_id, steps, id)
                VALUES (%s, %s, %s, %s)
                """, str(on_day), str(ck_id), str(steps), str(contador))

            print contador, str(on_day), str(ck_id), str(steps), str(contador)
            contador += 1

    ################  favorite blogs table

    if flag_favorite_blogs == 1:

        db.execute("DROP TABLE IF EXISTS favorite_blogs")
        db.execute("""                      
            CREATE TABLE favorite_blogs
            (
             ck_id             CHAR(36),     
             num_blogs         INT(11),
             num_favorites     INT(11),      
             id                INT(11)
        
            )
          """)

        file_name3 = "data_2009_2012_collected_june2013/favourite_blogs_threads.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            ck_id = str(list_one_line[0])
            num_blogs = str(list_one_line[1])
            num_favorites = str(list_one_line[2])

            db.execute(
                """
                INSERT INTO favorite_blogs (ck_id , num_blogs, num_favorites, id)
                VALUES (%s, %s, %s, %s)
                """, str(ck_id), str(num_blogs), str(num_favorites),
                str(contador))

            print contador, str(ck_id), str(num_blogs), str(
                num_favorites), str(contador)
            contador += 1

    ################  forum posts table

    if flag_forum_posts == 1:

        db.execute("DROP TABLE IF EXISTS forum_posts")
        db.execute("""                      
            CREATE TABLE forum_posts
            (
             at_time           DATETIME,
             thread_id         CHAR(36),     
             forum_id          CHAR(36),     
             ck_id             CHAR(36),             
             activity_flag     CHAR(3),        
             id                INT(11)        
            )
          """)

        file_name3 = "data_2009_2012_collected_june2013/forum_posts.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            yy = int(list_one_line[0].split("T")[0].split("-")[0])
            mm = int(list_one_line[0].split("T")[0].split("-")[1])
            dd = int(list_one_line[0].split("T")[0].split("-")[2])

            hh = int(list_one_line[0].split("T")[1].split(":")[0])
            mts = int(list_one_line[0].split("T")[1].split(":")[1])
            ss = int(list_one_line[0].split("T")[1].split(":")[2])

            at_time = datetime(yy, mm, dd, hh, mts, ss)

            thread_id = str(list_one_line[1])
            forum_id = str(list_one_line[2])
            ck_id = str(list_one_line[3])

            activity_flag = "FP"

            db.execute(
                """
                INSERT INTO forum_posts (at_time , thread_id, forum_id, ck_id, activity_flag, id)
                VALUES (%s, %s, %s, %s, %s, %s)
                """, str(at_time), str(thread_id), str(forum_id), str(ck_id),
                str(activity_flag), str(contador))

            print contador, str(at_time), str(thread_id), str(forum_id), str(
                ck_id), str(activity_flag), str(contador)
            contador += 1

    ################  forums table

    if flag_forums == 1:

        db.execute("DROP TABLE IF EXISTS forums")
        db.execute("""                      
            CREATE TABLE forums
            (        
             forum_id          CHAR(36),     
             user_created      CHAR(36),                        
             id                INT(11)        
            )
          """)

        file_name3 = "data_2009_2012_collected_june2013/forums.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            forum_id = str(list_one_line[0])
            user_created = str(list_one_line[1])

            db.execute(
                """
                INSERT INTO forums (forum_id, user_created, id)
                VALUES (%s, %s, %s)
                """, str(forum_id), str(user_created), str(contador))

            print contador, str(forum_id), str(user_created)
            contador += 1

    ################  friends table

    if flag_friends == 1:

        db.execute("DROP TABLE IF EXISTS friends")
        db.execute("""                      
            CREATE TABLE friends
            (        
             src          CHAR(36),     
             dest         CHAR(36),                        
             id           INT(11)
        
            )
          """)

        file_name3 = "data_2009_2012_collected_june2013/friends_list.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            src = str(list_one_line[0])
            dest = str(list_one_line[1])

            db.execute(
                """
                INSERT INTO friends (src, dest, id)
                VALUES (%s, %s, %s)
                """, str(src), str(dest), str(contador))

            print contador, str(src), str(dest)
            contador += 1

    ################  ignore table

    if flag_ignore == 1:

        db.execute("DROP TABLE IF EXISTS ignores")
        db.execute("""                      
            CREATE TABLE ignores
            (        
             src          CHAR(36),     
             dest         CHAR(36),                        
             id           INT(11)        
            )
          """)

        file_name3 = "data_2009_2012_collected_june2013/ignore_list.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            src = str(list_one_line[0])
            dest = str(list_one_line[1])

            db.execute(
                """
                INSERT INTO ignores (src, dest, id)
                VALUES (%s, %s, %s)
                """, str(src), str(dest), str(contador))

            print contador, str(src), str(dest)
            contador += 1

    ################  membership period table

    if flag_membership_periods == 1:

        db.execute("DROP TABLE IF EXISTS membership_periods")
        db.execute("""                      
            CREATE TABLE membership_periods   
            (
             ck_id         CHAR(36),   
             start_date    DATETIME,          
             end_date      DATETIME, 
             free          CHAR(3),
             payment       CHAR(3),
             voucher       CHAR(3),
             system        CHAR(3),
             plan          CHAR(10),
             free_days     CHAR(10),  
             id            INT(11)

            )
          """)

        file_name3 = "data_2009_2012_collected_june2013/membership_periods.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            ck_id = str(list_one_line[0])

            yy = int(list_one_line[1].split("T")[0].split("-")[0])
            mm = int(list_one_line[1].split("T")[0].split("-")[1])
            dd = int(list_one_line[1].split("T")[0].split("-")[2])

            hh = int(list_one_line[1].split("T")[1].split(":")[0])
            mts = int(list_one_line[1].split("T")[1].split(":")[1])
            ss = int(list_one_line[1].split("T")[1].split(":")[2])

            start_date = datetime(yy, mm, dd, hh, mts, ss)

            free = str(list_one_line[3])
            payment = str(list_one_line[4])
            voucher = str(list_one_line[5])
            system = str(list_one_line[6])
            plan = str(list_one_line[7])
            free_days = str(list_one_line[8])

            if list_one_line[2]:  # if there is an ending date

                yy = int(list_one_line[2].split("T")[0].split("-")[0])
                mm = int(list_one_line[2].split("T")[0].split("-")[1])
                dd = int(list_one_line[2].split("T")[0].split("-")[2])

                hh = int(list_one_line[2].split("T")[1].split(":")[0])
                mts = int(list_one_line[2].split("T")[1].split(":")[1])
                ss = int(list_one_line[2].split("T")[1].split(":")[2])

                end_date = datetime(yy, mm, dd, hh, mts, ss)

                db.execute(
                    """
                INSERT INTO membership_periods (ck_id, start_date, end_date, free, payment, voucher, system,  plan, free_days, id)
                VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                """, str(ck_id), str(start_date), str(end_date), str(free),
                    str(payment), str(voucher), str(system), str(plan),
                    str(free_days), str(contador))

                print contador, str(ck_id), str(start_date), str(
                    end_date), str(free), str(payment), str(voucher), str(
                        system), str(plan), str(free_days)

            else:  # if there is no ending date for the period

                db.execute(
                    """
                INSERT INTO membership_periods (ck_id, start_date,  free, payment, voucher, system,  plan, free_days, id)
                VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
                """, str(ck_id), str(start_date), str(free), str(payment),
                    str(voucher), str(system), str(plan), str(free_days),
                    str(contador))

                print contador, str(ck_id), str(start_date), str(free), str(
                    payment), str(voucher), str(system), str(plan), str(
                        free_days)

            contador += 1

    ################  private messages table

    if flag_private_messages == 1:

        db.execute("DROP TABLE IF EXISTS private_messages")
        db.execute("""                      
            CREATE TABLE private_messages 
            (        
             at_time           DATETIME,
             src_id            CHAR(36),     
             dest_id           CHAR(36),                        
             activity_flag     CHAR(3),
             id                INT(11)        
            )
         """)

        file_name3 = "data_2009_2012_collected_june2013/private_messages.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            yy = int(list_one_line[0].split("T")[0].split("-")[0])
            mm = int(list_one_line[0].split("T")[0].split("-")[1])
            dd = int(list_one_line[0].split("T")[0].split("-")[2])

            hh = int(list_one_line[0].split("T")[1].split(":")[0])
            mts = int(list_one_line[0].split("T")[1].split(":")[1])
            ss = int(list_one_line[0].split("T")[1].split(":")[2])

            at_time = datetime(yy, mm, dd, hh, mts, ss)

            src_id = str(list_one_line[1])
            dest_id = str(list_one_line[2])

            activity_flag = "PM"

            db.execute(
                """
                INSERT INTO private_messages  (at_time , src_id, dest_id, activity_flag, id)
                VALUES (%s, %s, %s, %s, %s)
                """, str(at_time), str(src_id), str(dest_id),
                str(activity_flag), str(contador))

            print contador, str(at_time), str(src_id), str(dest_id), str(
                activity_flag)
            contador += 1

    ################  lesson_comments table

    if flag_lesson_comments == 1:

        db.execute("DROP TABLE IF EXISTS lesson_comments")
        db.execute("""                      
        CREATE TABLE lesson_comments  
        (
         at_time           DATETIME,
         content_id        CHAR(36),     
         poster_id         CHAR(36),                        
         activity_flag     CHAR(3),
         id                INT(11)

        )
      """)

        file_name3 = "data_2009_2012_collected_june2013/program_lesson_comments.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            yy = int(list_one_line[0].split("T")[0].split("-")[0])
            mm = int(list_one_line[0].split("T")[0].split("-")[1])
            dd = int(list_one_line[0].split("T")[0].split("-")[2])

            hh = int(list_one_line[0].split("T")[1].split(":")[0])
            mts = int(list_one_line[0].split("T")[1].split(":")[1])
            ss = int(list_one_line[0].split("T")[1].split(":")[2])

            at_time = datetime(yy, mm, dd, hh, mts, ss)

            content_id = str(list_one_line[1])
            poster_id = str(list_one_line[2])

            activity_flag = "LC"

            db.execute(
                """
                INSERT INTO lesson_comments  (at_time , content_id, poster_id, activity_flag, id)
                VALUES (%s, %s, %s, %s, %s)
                """, str(at_time), str(content_id), str(poster_id),
                str(activity_flag), str(contador))

            print contador, str(at_time), str(content_id), str(poster_id), str(
                activity_flag)
            contador += 1

    ################ public_diary table

    if flag_public_diary == 1:

        db.execute("DROP TABLE IF EXISTS public_diary")
        db.execute("""                      
        CREATE TABLE  public_diary  
        (                
         ck_id            CHAR(36),     
         visibility       CHAR(36),                               
         id               INT(11)
        
        )
      """)

        file_name3 = "data_2009_2012_collected_june2013/public_diary.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            ck_id = str(list_one_line[0])
            visibility = str(list_one_line[1])

            db.execute(
                """
                INSERT INTO public_diary (ck_id, visibility, id)
                VALUES (%s, %s,  %s)
                """, str(ck_id), str(visibility), str(contador))

            print contador, str(ck_id), str(visibility)
            contador += 1

    ################  group_membership table

    if flag_public_group_memberships == 1:

        db.execute("DROP TABLE IF EXISTS public_group_memberships")

        db.execute("""                      
        CREATE TABLE public_group_memberships 
        (                 
         forum_id        CHAR(36),     
         ck_id           CHAR(36),                                
         id              INT(11)
        
        )
      """)

        file_name3 = "data_2009_2012_collected_june2013/public_group_memberships.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            forum_id = str(list_one_line[0])
            ck_id = str(list_one_line[1])

            db.execute(
                """
                INSERT INTO public_group_memberships (forum_id, ck_id, id)
                VALUES (%s, %s, %s)
                """, str(forum_id), str(ck_id), str(contador))

            print contador, str(forum_id), str(ck_id)
            contador += 1

    ################  homepage_comments table

    if flag_homepage_comments == 1:

        db.execute("DROP TABLE IF EXISTS homepage_comments")
        db.execute("""                      
        CREATE TABLE homepage_comments 
        (
         at_time           DATETIME,
         poster_id         CHAR(36),     
         owner_id          CHAR(36),                 
         id                INT(11),
         activity_flag     CHAR(3)
        
        )
      """)

        file_name3 = "data_2009_2012_collected_june2013/user_homepage_comments.txt"
        file3 = open(file_name3, 'r')
        list_lines_file3 = file3.readlines()

        empty_cases = 0
        contador = 1
        for line in list_lines_file3:  # read gender info from file

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            yy = int(list_one_line[0].split("T")[0].split("-")[0])
            mm = int(list_one_line[0].split("T")[0].split("-")[1])
            dd = int(list_one_line[0].split("T")[0].split("-")[2])

            hh = int(list_one_line[0].split("T")[1].split(":")[0])
            mts = int(list_one_line[0].split("T")[1].split(":")[1])
            ss = int(list_one_line[0].split("T")[1].split(":")[2])

            at_time = datetime(yy, mm, dd, hh, mts, ss)

            poster_id = str(list_one_line[1])
            owner_id = str(list_one_line[2])
            activity_flag = "HC"

            db.execute(
                """
                INSERT INTO homepage_comments (at_time , poster_id, owner_id, id, activity_flag)
                VALUES (%s, %s, %s, %s, %s)
                """, str(at_time), str(poster_id), str(owner_id),
                str(contador), str(activity_flag))

            print contador, str(at_time), str(poster_id), str(owner_id), str(
                activity_flag)

            contador += 1

    ################  activity_combined table

    if flag_activity_combined == 1:

        list_dict_total_act = []

        ### blog comments
        query1 = """select * from blog_comments"""
        result1 = db.query(query1)

        cont_act = 1
        for r1 in result1:  #list of dict.
            dict_user = {}

            dict_user['ck_id'] = r1['poster']
            dict_user['activity_date'] = r1['at_time']
            dict_user['activity_flag'] = "BC"

            list_dict_total_act.append(dict_user)

            print "blog", cont_act
            cont_act += 1

        ### forum posts
        query1 = """select * from forum_posts"""
        result1 = db.query(query1)

        cont_act = 1
        for r1 in result1:  #list of dict.
            dict_user = {}

            dict_user['ck_id'] = r1['ck_id']
            dict_user['activity_date'] = r1['at_time']
            dict_user['activity_flag'] = "FP"

            list_dict_total_act.append(dict_user)

            print "forum", cont_act
            cont_act += 1

        ### hompage comments
        query1 = """select * from homepage_comments"""
        result1 = db.query(query1)

        cont_act = 1
        for r1 in result1:  #list of dict.
            dict_user = {}

            dict_user['ck_id'] = r1['poster_id']
            dict_user['activity_date'] = r1['at_time']
            dict_user['activity_flag'] = "HC"

            list_dict_total_act.append(dict_user)

            print "hompage", cont_act
            cont_act += 1

        ###  lesson  comments
        query1 = """select * from lesson_comments"""
        result1 = db.query(query1)

        cont_act = 1
        for r1 in result1:  #list of dict.
            dict_user = {}

            dict_user['ck_id'] = r1['poster_id']
            dict_user['activity_date'] = r1['at_time']
            dict_user['activity_flag'] = "LC"

            list_dict_total_act.append(dict_user)

            print "lesson", cont_act
            cont_act += 1

        ### private_messages
        query1 = """select * from private_messages"""
        result1 = db.query(query1)

        cont_act = 1
        for r1 in result1:  #list of dict.
            dict_user = {}

            dict_user['ck_id'] = r1['src_id']
            dict_user['activity_date'] = r1['at_time']
            dict_user['activity_flag'] = "PM"

            list_dict_total_act.append(dict_user)

            print "message", cont_act
            cont_act += 1

        ### weigh in
        query1 = """select * from weigh_in_history"""
        result1 = db.query(query1)

        cont_act = 1
        for r1 in result1:  #list of dict.
            dict_user = {}

            dict_user['ck_id'] = r1['ck_id']
            dict_user['activity_date'] = r1['on_day']
            dict_user['activity_flag'] = "WI"

            list_dict_total_act.append(dict_user)

            print "wi", cont_act
            cont_act += 1

        print "tot activity combined:", len(list_dict_total_act)

        db.execute("DROP TABLE IF EXISTS activity_combined")
        db.execute("""                      
        CREATE TABLE activity_combined 
        (
         activity_date     DATETIME,
         ck_id             CHAR(36),                                
         activity_flag     CHAR(3),
         id                INT(11)        
        )
         """)

        contador = 1
        for dicc in list_dict_total_act:  # list of dicts

            activity_date = dicc['activity_date']
            ck_id = dicc['ck_id']
            activity_flag = dicc['activity_flag']

            db.execute(
                """
                INSERT INTO activity_combined (activity_date , ck_id, activity_flag, id)
                VALUES (%s, %s, %s, %s)
                """, str(activity_date), str(ck_id), str(activity_flag),
                str(contador))

            print contador, str(activity_date), str(ck_id), str(activity_flag)

            contador += 1

############# find the list of users with activity prior to 2009
    if flag_get_users_act_prior2009 == 1:

        list_day_diff = []
        list_users_act_prior2009 = []
        first_day = datetime(2009, 01, 01)

        print "querying the db..."
        query1 = """select * from activity_combined order by activity_date"""
        result1 = db.query(query1)

        for r1 in result1:  # list of dicts
            ck_id = r1['ck_id']
            date_act = r1['activity_date']

            if date_act < first_day:
                if ck_id not in list_users_act_prior2009:
                    list_users_act_prior2009.append(ck_id)
                    list_day_diff.append((date_act - first_day).days)
                print date_act, first_day, ck_id

        print "# users with activity prior to 2009:", len(
            list_users_act_prior2009)

        histograma(list_day_diff, "./histogr_diff_days_prior2009.dat")

    ################## users table
    if flag_users == 1:

        db.execute("DROP TABLE IF EXISTS users")
        db.execute(
            """                      
           CREATE TABLE  users
           (        
            ck_id                   CHAR(36), 
            join_date               DATETIME,
            initial_weight          FLOAT,         
            most_recent_weight      FLOAT,                        
            height                  INT(11),       
            age                     INT(11),
            state                   CHAR(36), 
            is_staff                CHAR(36), 
            internal_CK_account     CHAR(36), 
            id                      INT(11),
            total_balance           CHAR(10),  
            gender                  CHAR(10),                     
            act_prior2009       CHAR(36)                     
        
           )
         """
        )  # if i use triple quotation marks, i can have jumps of line no problem, but not with single ones

        file_name2 = "data_2009_2012_collected_june2013/users.txt"
        file2 = open(file_name2, 'r')
        list_lines_file2 = file2.readlines()

        contador = 0
        dict_of_dicts = {}
        dict_id_ckid = {}
        dict_user_internal_account = {}
        for line in list_lines_file2:  # read gender info from file
            contador += 1
            dict_one_user = {}

            list_one_line = line.strip("\n\r").split(
                ","
            )  #remove \n\r together! (this is how the jump is coded in certain op. systems)

            ck_id = str(list_one_line[0])

            yy = int(list_one_line[1].split("T")[0].split("-")[0])
            mm = int(list_one_line[1].split("T")[0].split("-")[1])
            dd = int(list_one_line[1].split("T")[0].split("-")[2])

            hh = int(list_one_line[1].split("T")[1].split(":")[0])
            mts = int(list_one_line[1].split("T")[1].split(":")[1])
            ss = int(list_one_line[1].split("T")[1].split(":")[2])

            join_date = datetime(yy, mm, dd, hh, mts, ss)

            initial_weight = float(list_one_line[2])
            most_recent_weight = float(list_one_line[3])
            height = float(list_one_line[4])
            try:
                age = int(list_one_line[5])
            except ValueError:
                age = 0

            state = str(list_one_line[6])
            is_staff = str(list_one_line[7])
            gender = str(list_one_line[8]
                         )  # this is how some op. sys. code the jump of line!

            total_balance = str(
                list_one_line[9]
            )  # either the user paid something or was totally free...

            dict_one_user['ck_id'] = ck_id
            dict_one_user['join_date'] = join_date
            dict_one_user['initial_weight'] = initial_weight
            dict_one_user['most_recent_weight'] = most_recent_weight
            dict_one_user['height'] = height
            dict_one_user['age'] = age
            dict_one_user['state'] = state
            dict_one_user['is_staff'] = is_staff
            dict_one_user['gender'] = gender
            dict_one_user['total_balance'] = total_balance
            dict_one_user['id'] = contador

            dict_one_user['act_prior2009'] = "NO"
            if ck_id in list_users_act_prior2009:
                dict_one_user['act_prior2009'] = "YES"

            dict_user_internal_account[ck_id] = "NO"

            dict_id_ckid[contador] = ck_id

            dict_of_dicts[ck_id] = dict_one_user

        print "number of users in the user.txt file:", len(dict_of_dicts), "\n"
        print "  populating the users table..."

        #### i get the info about users that actually correspond to an internal CK account

        file = open(
            "./data_2009_2012_collected_june2013/CK_internal_accounts_list.dat",
            "r")
        list_data = file.readlines()

        for data in list_data:
            ck_id = data.strip("\r\n")
            dict_user_internal_account[
                ck_id] = "YES"  # the by-default value isbeen set to NO

        #### i populate the users table

        for user_id in range(len(dict_of_dicts)):
            user_id += 1

            ck_id = dict_id_ckid[user_id]

            join_date = dict_of_dicts[ck_id]['join_date']
            initial_weight = dict_of_dicts[ck_id]['initial_weight']
            most_recent_weight = dict_of_dicts[ck_id]['most_recent_weight']
            height = dict_of_dicts[ck_id]['height']
            age = dict_of_dicts[ck_id]['age']
            state = dict_of_dicts[ck_id]['state']
            is_staff = dict_of_dicts[ck_id]['is_staff']
            gender = dict_of_dicts[ck_id]['gender']
            total_balance = dict_of_dicts[ck_id]['total_balance']
            act_prior2009 = dict_of_dicts[ck_id]['act_prior2009']
            internal_account = dict_user_internal_account[ck_id]

            db.execute(
                """
                INSERT INTO users (ck_id,  join_date, initial_weight, most_recent_weight, height, age, state, is_staff, internal_CK_account, id, gender, total_balance, act_prior2009)
                VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                """, str(ck_id), str(join_date), str(initial_weight),
                str(most_recent_weight), str(height), str(age), str(state),
                str(is_staff), str(internal_account), str(user_id),
                str(gender), str(total_balance), str(act_prior2009))

            print user_id, ck_id, join_date, initial_weight, most_recent_weight, height, age, state, is_staff, internal_account, gender, total_balance
def main(graph_name):

    G = nx.read_gml(graph_name)

    cutting_day = 125  # to separate   training-testing

    Niter_training = 100
    Niter_testing = 100

    delta_end = 3  # >= than + or -  dr difference at the end of the evolution

    dir_real_data = '../Results/'

    all_team = "NO"  # as adopters or not

    # output_file3=dir_real_data+"Landscape_parameters_persuasion_"+str(Niter)+"iter.dat"
    #file3 = open(output_file3,'wt')

    ######################################################################################
    #  I read the file of the actual evolution of the idea spreading in the hospital:   ##
    ######################################################################################

    if all_team == "YES":
        filename_actual_evol = dir_real_data + "HospitalModel_august1_adoption_counts_all_team_as_adopters_SIMPLER.csv"

    else:
        filename_actual_evol = dir_real_data + "HospitalModel_august1_adoption_counts_SIMPLER.csv"
    #ya no necesito CAMBIAR TB EL NOMBRE DEL ARCHIVO EN EL CODIGO PARA COMPARAR CURVAs

    list_actual_evol = []
    result_actual_file = csv.reader(open(filename_actual_evol, 'rb'),
                                    delimiter=',')
    cont = 0
    for row in result_actual_file:
        if cont > 0:  # i ignore the first line with the headers

            num_adopters = row[3]

            list_actual_evol.append(float(num_adopters))

        cont += 1

    list_actual_evol_training = list_actual_evol[:cutting_day]
    list_actual_evol_testing = list_actual_evol[(cutting_day - 1):]

    ##################################################################

    #../Results/network_final_schedule_withTeam3/Time_evolutions_Persuasion_alpha0.2_damping0.0_mutual_encourg0.7_threshold0.4_unif_distr_50iter_2012_seed31Oct_finalnetwork.dat

    alpha_F_min = 0.0  # alpha=0: nobody changes their mind
    alpha_F_max = 1.001
    delta_alpha_F = 0.1

    min_damping = 0.0  #its harder to go back from YES to NO again. =1 means no effect, =0.5 half the movement from Y->N than the other way around, =0 never go back from Y to N
    max_damping = 1.01
    delta_damping = 0.1

    min_mutual_encouragement = 0.0  # when two Adopters meet, they convince each other even more
    max_mutual_encouragement = 1.01
    delta_mutual_encouragement = 0.1

    print "\n\nPersuasion process on network, with Niter:", Niter_training

    dict_filenames_tot_distance = {
    }  # i will save the filename as key and the tot distance from that curve to the original one

    dict_filenames_list_dict_network_states = {}

    alpha_F = alpha_F_min
    while alpha_F <= alpha_F_max:  # i explore all the parameter space, and create a file per each set of values
        alpha_A = 0.5 * alpha_F
        print "  alpha_F:", alpha_F

        mutual_encouragement = min_mutual_encouragement
        while mutual_encouragement <= max_mutual_encouragement:
            print "    mutual_encouragement:", mutual_encouragement

            damping = min_damping
            while damping <= max_damping:
                print "      damping:", damping

                dir = "../Results/network_final_schedule_withTeam3_local/"
                output_file = dir + "Time_evolutions_Persuasion_training_alpha" + str(
                    alpha_F
                ) + "_damping" + str(damping) + "_mutual_encourg" + str(
                    mutual_encouragement) + "_" + str(
                        Niter_training) + "iter_distributed_thresholds.dat"
                file = open(output_file, 'wt')
                file.close()

                time_evol_number_adopters_ITER = [
                ]  # list of complete single realizations of the dynamics
                list_dist_fixed_parameters = []
                list_dist_abs_at_ending_point_fixed_parameters = []

                list_dict_network_states = []
                list_networks_at_cutting_day = []

                for iter in range(Niter_training):

                    print "         ", iter
                    list_t = []

                    time_evol_number_adopters = [
                    ]  # for a single realization of the dynamics

                    dict_network_states = {}

                    num_adopters, seed_shift, max_shift = set_ic(
                        G
                    )  # i establish who is Adopter and NonAdopter initially, and count how many shifts i have total

                    time_evol_number_adopters.append(float(num_adopters))
                    list_t.append(0)

                    ########### the dynamics starts:
                    t = int(
                        seed_shift) + 1  # the first time step is just IC.???

                    while t < cutting_day:  # loop over shifts, in chronological order  (the order is the day index since seeding_day)

                        list_t.append(t)
                        for n in G.nodes():
                            if G.node[n]['type'] == "shift" and G.node[n][
                                    'order'] == t:  # i look for the shift corresponding to that time step
                                flag_possible_persuasion = 0
                                for doctor in G.neighbors(n):
                                    if G.node[doctor][
                                            "status"] == "Adopter":  #first i check if any doctor is an adopter in this shift
                                        flag_possible_persuasion = 1
                                        break

                                if flag_possible_persuasion == 1:
                                    list_doctors = []
                                    for doctor in G.neighbors(
                                            n):  # for all drs in that shift
                                        list_doctors.append(doctor)

                                    pairs = itertools.combinations(
                                        list_doctors, 2
                                    )  # cos the shift can be 2 but also 3 doctors
                                    for pair in pairs:
                                        doctor1 = pair[0]
                                        doctor2 = pair[1]

                                        if G.node[doctor1]['status'] != G.node[
                                                doctor2][
                                                    'status']:  # if they think differently,
                                            # there will be persuasion
                                            persuasion(
                                                G, damping, doctor1, doctor2,
                                                alpha_A, alpha_F
                                            )  # i move their values of opinion
                                            update_opinions(
                                                G, doctor1, doctor2
                                            )  #  i update status and make sure the values of the vectors stay between [0,1]

                                        else:  # if two Adopters meet, they encourage each other (if two NonAdopters, nothing happens)

                                            mutual_reinforcement(
                                                G, mutual_encouragement,
                                                doctor1, doctor2)

                        list_Adopters = []  #count how many i have at this time
                        for n in G.nodes():
                            try:
                                if G.node[n]["status"] == "Adopter":
                                    if G.node[n]["label"] not in list_Adopters:
                                        list_Adopters.append(
                                            G.node[n]["label"])
                            except:
                                pass  # if the node is a shift, it doesnt have a 'status' attribute

                        time_evol_number_adopters.append(
                            float(len(list_Adopters)))

                        t += 1

                    ############## end while loop over t

                    for n in G.nodes():
                        if G.node[n]['type'] != "shift":
                            dict_network_states[
                                G.node[n]["label"]] = G.node[n]["status"]

                    list_dict_network_states.append(dict_network_states)

                    time_evol_number_adopters_ITER.append(
                        time_evol_number_adopters)

                    list_dist_fixed_parameters.append(
                        compare_real_evol_vs_simus_to_be_called.
                        compare_two_curves(list_actual_evol_training,
                                           time_evol_number_adopters))

                    list_dist_abs_at_ending_point_fixed_parameters.append(
                        abs(time_evol_number_adopters[-1] -
                            list_actual_evol_training[-1]))

                #######################   end loop Niter for the training fase

                list_pair_dist_std_delta_end = []

                list_pair_dist_std_delta_end.append(
                    numpy.mean(list_dist_fixed_parameters)
                )  # average dist between the curves over Niter
                list_pair_dist_std_delta_end.append(
                    numpy.std(list_dist_fixed_parameters))

                list_pair_dist_std_delta_end.append(
                    numpy.mean(list_dist_abs_at_ending_point_fixed_parameters))

                if (
                        numpy.mean(
                            list_dist_abs_at_ending_point_fixed_parameters)
                ) <= delta_end:  # i only consider situations close enough at the ending point

                    dict_filenames_tot_distance[
                        output_file] = list_pair_dist_std_delta_end

                    #print >> file3, alpha_F,damping,mutual_encouragement,threshold,dict_filenames_tot_distance[output_file][0],dict_filenames_tot_distance[output_file][1]

                    dict_filenames_list_dict_network_states[
                        output_file] = list_dict_network_states

                file = open(output_file, 'wt')
                for i in range(len(
                        time_evol_number_adopters)):  #time step by time step
                    list_fixed_t = []
                    for iteracion in range(
                            Niter_training
                    ):  #loop over all independent iter of the process
                        list_fixed_t.append(
                            time_evol_number_adopters_ITER[iteracion][i]
                        )  # i collect all values for the same t, different iter

                    print >> file, list_t[i], numpy.mean(
                        list_fixed_t), numpy.std(
                            list_fixed_t
                        ), alpha_F, damping, mutual_encouragement
                file.close()

                damping += delta_damping
            mutual_encouragement += delta_mutual_encouragement
        alpha_F += delta_alpha_F

    list_order_dict = compare_real_evol_vs_simus_to_be_called.pick_minimum_same_end(
        dict_filenames_tot_distance,
        "Persuasion_training_distributed_thresholds", all_team, Niter_training)

    #./Results/network_final_schedule_withTeam3_local/Time_evolutions_Persuasion_alpha0.4_damping0.4_mutual_encourg0.6_threshold0.5_unif_distr_2iter_2012_seed31Oct_finalnetwork.dat

    optimum_filename = list_order_dict[0][0]

    alpha_F = float(list_order_dict[0][0].split("_alpha")[1][0:3])
    alpha_A = 0.5 * alpha_F
    damping = float(list_order_dict[0][0].split("_damping")[1][0:3])
    mutual_encouragement = float(
        list_order_dict[0][0].split("_mutual_encourg")[1][0:3])

    #   raw_input()
    print "starting testing fase with:"
    print "alpha=", alpha_F, " damping=", damping, " mutual encourag=", mutual_encouragement, " distributed threshold"

    #  i already know the optimum, now i run the dynamics with those values, starting from the average state on the cutting point, and test:

    time_evol_number_adopters_ITER = [
    ]  # list of complete single realizations of the dynamics

    list_dict_network_states = []

    list_dist_fixed_parameters = []
    list_dist_at_ending_point_fixed_parameters = []
    list_dist_abs_at_ending_point_fixed_parameters = []

    list_lists_t_evolutions = []

    lista_num_adopters = []
    lista_Adopters = []
    dict_tot_Adopters = {}

    for dictionary in dict_filenames_list_dict_network_states[
            optimum_filename]:
        # dictionary={Dr1:status, Dr2:status,}  # one dict per iteration
        num_Adopters = 0.

        for key in dictionary:
            if dictionary[key] == "Adopter":
                num_Adopters += 1.
                if key not in lista_Adopters:
                    lista_Adopters.append(key)
                    dict_tot_Adopters[key] = 1.
                else:
                    dict_tot_Adopters[key] += 1.

        lista_num_adopters.append(num_Adopters)

    avg_adopters = int(
        numpy.mean(lista_num_adopters))  # i find out the average num Adopters
    print numpy.mean(lista_num_adopters), avg_adopters, numpy.std(
        lista_num_adopters)

    if numpy.mean(lista_num_adopters) - avg_adopters >= 0.5:
        avg_adopters += 1.0
        print avg_adopters

# i sort the list from more frequently infected to less
    list_sorted_dict = sorted(dict_tot_Adopters.iteritems(),
                              key=operator.itemgetter(1))

    new_list_sorted_dict = list_sorted_dict
    new_list_sorted_dict.reverse()

    print "Adopters:", new_list_sorted_dict

    #list_sorted_dict=[(u'Weiss', 5.0), (u'Wunderink', 5.0), (u'Keller', 4.0), (u'Go', 3.0), (u'Cuttica', 3.0), (u'Rosario', 2.0), (u'Radigan', 2.0), (u'Smith', 2.0), (u'RosenbergN', 2.0), (u'Gillespie', 1.0), (u'Osher', 1.0), (u'Mutlu', 1.0), (u'Dematte', 1.0), (u'Hawkins', 1.0), (u'Gates', 1.0)]

    lista_avg_Adopters = [
    ]  # i create the list of Drs that on average are most likely infected by the cutting day

    i = 1
    for item in new_list_sorted_dict:
        if (item[0] not in lista_avg_Adopters) and (i <= avg_adopters):

            lista_avg_Adopters.append(item[0])
            i += 1

    print lista_avg_Adopters

    for iter in range(Niter_testing):

        print "         ", iter

        dict_dr_status_current_iter = dict_filenames_list_dict_network_states[
            optimum_filename][iter]

        time_evol_number_adopters = [
        ]  # for a single realization of the dynamics

        dict_network_states = {}

        list_t = []

        ###############
        # NECESITO GUARDAR RECORD DE LOS THERSHOLDS PERSONALES PARA USARLOS LUEGO aki???
        ########

        list_Adopters = []  #set initial conditions
        for node in G.nodes():
            if G.node[node]['type'] != "shift":

                # personal threshold have been established for each dr at the beginning of the simu: set_ic()

                G.node[node][
                    "status"] = "NonAdopter"  # by default,  non-Adopters

                label = G.node[node]['label']

                if label in lista_avg_Adopters:

                    G.node[node]["status"] = "Adopter"
                    G.node[node]["adoption_vector"] = random.random() * (
                        1.0 -
                        G.node[node]["personal_threshold"]) + G.node[node][
                            "personal_threshold"]  #values from (threshold,1]
                    if G.node[node]["adoption_vector"] > 1.0:
                        G.node[node]["adoption_vector"] = 1.0

                    list_Adopters.append(G.node[node]["label"])

        time_evol_number_adopters.append(float(len(list_Adopters)))
        list_t.append(cutting_day)

        ################# the dynamics starts for the testing fase:

        t = cutting_day

        while t <= max_shift:  # loop over shifts, in chronological order  (the order is the day index since seeding_day)

            list_t.append(t)
            for n in G.nodes():
                if G.node[n]['type'] == "shift" and G.node[n][
                        'order'] == t:  # i look for the shift corresponding to that time step
                    flag_possible_persuasion = 0
                    for doctor in G.neighbors(n):
                        if G.node[doctor][
                                "status"] == "Adopter":  #first i check if any doctor is an adopter in this shift
                            flag_possible_persuasion = 1
                            break

                    if flag_possible_persuasion == 1:
                        list_doctors = []
                        for doctor in G.neighbors(
                                n):  # for all drs in that shift
                            list_doctors.append(doctor)

                        pairs = itertools.combinations(
                            list_doctors,
                            2)  # cos the shift can be 2 but also 3 doctors
                        for pair in pairs:
                            doctor1 = pair[0]
                            doctor2 = pair[1]

                            if G.node[doctor1]['status'] != G.node[doctor2][
                                    'status']:  # if they think differently,
                                # there will be persuasion
                                persuasion(
                                    G, damping, doctor1, doctor2, alpha_A,
                                    alpha_F)  # i move their values of opinion
                                update_opinions(
                                    G, doctor1, doctor2
                                )  #  i update status and make sure the values of the vectors stay between [0,1]

                            else:  # if two Adopters meet, they encourage each other (if two NonAdopters, nothing happens)

                                mutual_reinforcement(G, mutual_encouragement,
                                                     doctor1, doctor2)

            list_Adopters = []  #count how many i have at this time
            for n in G.nodes():
                try:
                    if G.node[n]["status"] == "Adopter":
                        if G.node[n]["label"] not in list_Adopters:
                            list_Adopters.append(G.node[n]["label"])
                except:
                    pass  # if the node is a shift, it doesnt have a 'status' attribute

            time_evol_number_adopters.append(float(len(list_Adopters)))
            print t, len(list_Adopters)

            t += 1

        ############## end while loop over t

        #raw_input()

        for n in G.nodes():
            if G.node[n]['type'] != "shift":
                dict_network_states[G.node[n]["label"]] = G.node[n]["status"]

        list_dict_network_states.append(dict_network_states)

        time_evol_number_adopters_ITER.append(time_evol_number_adopters)

        list_dist_fixed_parameters.append(
            compare_real_evol_vs_simus_to_be_called.compare_two_curves(
                list_actual_evol_testing, time_evol_number_adopters))

        list_dist_abs_at_ending_point_fixed_parameters.append(
            abs(time_evol_number_adopters[-1] - list_actual_evol_testing[-1]))

        list_dist_at_ending_point_fixed_parameters.append(
            time_evol_number_adopters[-1] - list_actual_evol_testing[-1])

        #######################end loop over Niter for the testing fase

    list_pair_dist_std_delta_end = []

    list_pair_dist_std_delta_end.append(
        numpy.mean(list_dist_fixed_parameters
                   ))  # average dist between the curves over Niter
    list_pair_dist_std_delta_end.append(numpy.std(list_dist_fixed_parameters))

    list_pair_dist_std_delta_end.append(
        numpy.mean(list_dist_abs_at_ending_point_fixed_parameters))

    if (
            numpy.mean(list_dist_abs_at_ending_point_fixed_parameters)
    ) <= delta_end:  # i only consider situations close enough at the ending point

        dict_filenames_tot_distance[output_file] = list_pair_dist_std_delta_end

        dict_filenames_list_dict_network_states[
            output_file] = list_dict_network_states

    num_valid_endings = 0.
    for item in list_dist_abs_at_ending_point_fixed_parameters:
        if item <= delta_end:  # i count how many realizations i get close enough at the ending point
            num_valid_endings += 1.

    print "average distance of the optimum in the testing segment:", numpy.mean(
        list_dist_fixed_parameters), numpy.std(
            list_dist_fixed_parameters), list_dist_fixed_parameters
    print "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter_testing, list_dist_abs_at_ending_point_fixed_parameters

    histograma_gral_negv_posit.histograma(
        list_dist_at_ending_point_fixed_parameters,
        "../Results/histogr_raw_distances_ending_test_train_persuasion_avg_ic_"
        + str(Niter_testing) + "iter_distributed_thresholds.dat")

    output_file8 = "../Results/List_tot_distances_training_segment_persuasion_alpha" + str(
        alpha_F) + "_damping" + str(damping) + "_mutual_encourg" + str(
            mutual_encouragement) + "_" + str(
                Niter_training) + "iter_avg_ic_distributed_thresholds.dat"
    file8 = open(output_file8, 'wt')

    for item in list_dist_fixed_parameters:
        print >> file8, item
    file8.close()

    output_file9 = "../Results/List_distances_ending_training_segment_persuasion_alpha" + str(
        alpha_F) + "_damping" + str(damping) + "_mutual_encourg" + str(
            mutual_encouragement) + "_" + str(
                Niter_training) + "iter_avg_ic_distributed_thresholds.dat"
    file9 = open(output_file9, 'wt')

    for item in list_dist_abs_at_ending_point_fixed_parameters:
        print >> file9, item
    file9.close()

    output_file = dir + "Time_evolutions_Persuasion_testing_avg_ic_alpha" + str(
        alpha_F
    ) + "_damping" + str(damping) + "_mutual_encourg" + str(
        mutual_encouragement
    ) + "_unif_distr_" + str(
        Niter_training
    ) + "iter_2012_seed31Oct_finalnetwork_avg_ic_distributed_thresholds.dat"
    file = open(output_file, 'wt')

    for i in range(len(time_evol_number_adopters)):  #time step by time step
        list_fixed_t = []
        for iteracion in range(
                Niter_training
        ):  #loop over all independent iter of the process
            list_fixed_t.append(
                time_evol_number_adopters_ITER[iteracion]
                [i])  # i collect all values for the same t, different iter

        print >> file, list_t[i], numpy.mean(list_fixed_t), numpy.std(
            list_fixed_t), alpha_F, damping, mutual_encouragement
    file.close()

    print "written training segment file:", optimum_filename
    print "written testing segment file:", output_file

    output_file10 = "../Results/Summary_results_train_test_persuasion_alpha" + str(
        alpha_F) + "_damping" + str(damping) + "_mutual_encourg" + str(
            mutual_encouragement) + "_" + str(
                Niter_training) + "iter_avg_ic_distributed_thresholds.dat"
    file10 = open(output_file10, 'wt')

    print >> file10, "Summary results from train-testing persuasion with", Niter_training, Niter_testing, "iter (respectively), using the avg of the cutting points as IC, and with values for the parameters:  alpha ", alpha_F, " damping: ", damping, " mutual_encourg: ", mutual_encouragement, " distributed threshold"

    print >> file10, "average distance of the optimum in the testing segment:", numpy.mean(
        list_dist_fixed_parameters), numpy.std(
            list_dist_fixed_parameters), list_dist_fixed_parameters
    print >> file10, "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter_testing, list_dist_at_ending_point_fixed_parameters

    print >> file10, "written training segment file:", optimum_filename
    print >> file10, "written testing segment file:", output_file

    file10.close()
Exemplo n.º 4
0
def main(graph_name):

    cutting_day = 125  # to separate   training-testing

    G = nx.read_gml(graph_name)

    all_team = "NO"  # as adopters or not

    dir_real_data = '../Results/'

    delta_end = 3  # >= than + or -  dr difference at the end of the evolution (NO realization ends up closer than this!!!! if 2, i get and empty list!!!)

    Niter_training = 100
    Niter_testing = 100

    output_file3 = dir_real_data + "Landscape_parameters_infection_train_test_" + str(
        Niter_training) + "iter.dat"
    file3 = open(output_file3, 'wt')

    file3.close()

    ######################################################################################
    #  I read the file of the actual evolution of the idea spreading in the hospital:   ##
    ######################################################################################

    if all_team == "YES":
        filename_actual_evol = dir_real_data + "HospitalModel_august1_adoption_counts_all_team_as_adopters_SIMPLER.csv"

    else:
        filename_actual_evol = dir_real_data + "HospitalModel_august1_adoption_counts_SIMPLER.csv"
    #ya no necesito CAMBIAR TB EL NOMBRE DEL ARCHIVO EN EL CODIGO PARA COMPARAR CURVAs

    list_actual_evol = []
    result_actual_file = csv.reader(open(filename_actual_evol, 'rb'),
                                    delimiter=',')
    cont = 0
    for row in result_actual_file:
        if cont > 0:  # i ignore the first line with the headers

            num_adopters = row[3]

            list_actual_evol.append(float(num_adopters))

        cont += 1

    list_actual_evol_training = list_actual_evol[:cutting_day]
    list_actual_evol_testing = list_actual_evol[(cutting_day - 1):]

    ##################################################################

    #../Results/network_final_schedule_withTeam3/infection/Average_time_evolution_Infection_p0.9_Immune0.5_1000iter_2012.dat

    prob_min = 0.0
    prob_max = 1.001
    delta_prob = 0.01

    prob_Immune_min = 0.00
    prob_Immune_max = 1.001
    delta_prob_Immune = 0.01

    dir = "../Results/network_final_schedule_withTeam3_local/infection/"

    dict_filenames_tot_distance = {
    }  # i will save the filename as key and the tot distance from that curve to the original one

    dict_filenames_list_dict_network_states = {
    }  # i will save the filename as key and the list of networks at cutting day as value

    prob_Immune = prob_Immune_min
    while prob_Immune <= prob_Immune_max:

        print "prom Immune:", prob_Immune

        prob_infection = prob_min
        while prob_infection <= prob_max:

            print "  p:", prob_infection

            output_file2 = dir + "Average_time_evolution_Infection_training_p" + str(
                prob_infection) + "_" + "Immune" + str(
                    prob_Immune) + "_" + str(
                        Niter_training) + "iter_2012_avg_ic_day" + str(
                            cutting_day) + ".dat"
            file2 = open(output_file2, 'wt')
            file2.close()

            # i create the empty list of list for the Niter temporal evolutions
            num_shifts = 0
            num_Drs = 0.
            for n in G.nodes():
                G.node[n]["status"] = "S"
                if G.node[n]['type'] == "shift":
                    num_shifts += 1
                else:
                    num_Drs += 1.

        #  list_final_I_values_fixed_p=[]  # i dont care about the final values right now, but about the whole time evol
            list_lists_t_evolutions = []

            list_dist_fixed_parameters = []
            list_dist_abs_at_ending_point_fixed_parameters = []
            list_final_num_infected = []
            list_dict_network_states = []

            for iter in range(Niter_training):

                print "     iter:", iter

                dict_network_states = {}

                list_I = []  #list infected doctors
                list_ordering = []
                list_s = []
                list_A = []
                list_F = []

                ########### set I.C.

                max_order = 0
                for n in G.nodes():
                    G.node[n]["status"] = "S"  # all nodes are Susceptible
                    if G.node[n]['type'] == "shift":
                        list_s.append(n)
                        if G.node[n]['order'] > max_order:
                            max_order = G.node[n]['order']
                    else:
                        if G.node[n]['label'] == "Wunderink" or G.node[n][
                                "label"] == "Weiss":
                            G.node[n]["status"] = "I"
                            list_I.append(G.node[n]['label'])

                        if G.node[n]['type'] == "A":
                            list_A.append(n)

                        if G.node[n]['type'] == "F":
                            list_F.append(n)

                list_single_t_evolution = []
                list_single_t_evolution.append(
                    2.0)  # I always start with TWO infected doctors!!

                for n in G.nodes(
                ):  # i make some DOCTORs INMUNE  (anyone except Weiss and Wunderink)
                    if (G.node[n]['type'] == "A") or (G.node[n]['type']
                                                      == "F"):
                        if G.node[n]['label'] != "Wunderink" and G.node[n][
                                "label"] != "Weiss":
                            rand = random.random()
                            if rand < prob_Immune:
                                G.node[n]["status"] = "Immune"

            #   print max_order

            ################# the dynamics starts:

                t = 1
                while t < cutting_day:  # loop over shifts, in order   just until cutting day (training segment)
                    for n in G.nodes():
                        if G.node[n]['type'] == "shift" and G.node[n][
                                'order'] == t:
                            flag_possible_infection = 0
                            for doctor in G.neighbors(
                                    n
                            ):  #first i check if any doctor is infected in this shift
                                if G.node[doctor]["status"] == "I":
                                    flag_possible_infection = 1

                            if flag_possible_infection:
                                for doctor in G.neighbors(
                                        n
                                ):  # then the doctors in that shift, gets infected with prob_infection
                                    if G.node[doctor]["status"] == "S":
                                        rand = random.random()
                                        if rand < prob_infection:
                                            G.node[doctor]["status"] = "I"
                                            list_I.append(
                                                G.node[doctor]["label"])

                    list_single_t_evolution.append(float(
                        len(list_I)))  #/(len(list_A)+len(list_F)))

                    t += 1
                ######## end t loop

                for n in G.nodes():
                    if G.node[n]['type'] != "shift":
                        dict_network_states[G.node[n]
                                            ["label"]] = G.node[n]["status"]

                list_dict_network_states.append(dict_network_states)

                list_lists_t_evolutions.append(list_single_t_evolution)

                list_dist_fixed_parameters.append(
                    compare_real_evol_vs_simus_to_be_called.compare_two_curves(
                        list_actual_evol_training, list_single_t_evolution))

                list_dist_abs_at_ending_point_fixed_parameters.append(
                    abs(list_single_t_evolution[-1] -
                        list_actual_evol_training[-1])
                )  # i save the distance at the ending point between the current simu and actual evol

                #  print "actual:",len(list_actual_evol_training),"  simu:",len(list_single_t_evolution)   # 125, 125

                list_final_num_infected.append(list_single_t_evolution[-1])

            ######## end loop Niter for the training fase

            list_pair_dist_std_delta_end = []

            list_pair_dist_std_delta_end.append(
                numpy.mean(list_dist_fixed_parameters)
            )  # average dist between the curves over Niter
            list_pair_dist_std_delta_end.append(
                numpy.std(list_dist_fixed_parameters))

            list_pair_dist_std_delta_end.append(
                numpy.mean(list_dist_abs_at_ending_point_fixed_parameters))

            file3 = open(output_file3, 'at')  # i print out the landscape
            print >> file3, prob_infection, prob_Immune, numpy.mean(
                list_dist_abs_at_ending_point_fixed_parameters
            ), numpy.mean(list_dist_fixed_parameters), numpy.mean(
                list_final_num_infected
            ), numpy.std(list_final_num_infected), numpy.std(
                list_final_num_infected) / numpy.mean(list_final_num_infected)
            file3.close()

            if (
                    numpy.mean(list_dist_abs_at_ending_point_fixed_parameters)
            ) <= delta_end:  # i only consider situations close enough at the ending point

                dict_filenames_tot_distance[
                    output_file2] = list_pair_dist_std_delta_end

                dict_filenames_list_dict_network_states[
                    output_file2] = list_dict_network_states

            file2 = open(output_file2, 'at')
            for s in range(len(list_single_t_evolution)):
                list_fixed_t = []
                for iter in range(Niter_training):
                    list_fixed_t.append(list_lists_t_evolutions[iter][s])
                print >> file2, s, numpy.mean(list_fixed_t)
            file2.close()

            prob_infection += delta_prob
        prob_Immune += delta_prob_Immune

    list_order_dict = compare_real_evol_vs_simus_to_be_called.pick_minimum_same_end(
        dict_filenames_tot_distance, "Infection_training", all_team,
        Niter_training, cutting_day)

    # it returns a list of tuples like this :  ('../Results/network_final_schedule_withTeam3_local/infection/Average_time_evolution_Infection_training_p0.7_Immune0.0_2iter_2012.dat', [2540.0, 208.0, 1.0])  the best set of parameters  being the fist one of the elements in the list.

    optimum_filename = list_order_dict[0][0]
    prob_infection = float(list_order_dict[0][0].split("_p")[1].split("_")[0])
    prob_Immune = float(
        list_order_dict[0][0].split("_Immune")[1].split("_")[0])

    print "starting testing fase with:"
    print "p=", prob_infection, " and Pimmune=", prob_Immune

    #  i already know the optimum, now i run the dynamics with those values, starting from the average state on the cutting point, and test:

    list_dist_fixed_parameters = []
    list_dist_abs_at_ending_point_fixed_parameters = []
    list_dist_at_ending_point_fixed_parameters = []

    list_lists_t_evolutions = []

    lista_num_infect = []
    lista_I_drs = []
    dict_tot_I_doctors = {}

    lista_num_imm = []
    lista_Imm_drs = []
    dict_tot_Imm_doctors = {}
    for dictionary in dict_filenames_list_dict_network_states[
            optimum_filename]:

        # dictionary={Dr1:status, Dr2:status,}  # one dict per iteration
        num_I = 0.
        num_Imm = 0.

        for key in dictionary:
            if dictionary[key] == "I":
                num_I += 1.
                if key not in lista_I_drs:
                    lista_I_drs.append(key)
                    dict_tot_I_doctors[key] = 1.
                else:
                    dict_tot_I_doctors[key] += 1.

            elif dictionary[key] == "Immune":
                num_Imm += 1.
                if key not in lista_Imm_drs:
                    lista_Imm_drs.append(key)
                    dict_tot_Imm_doctors[key] = 1.
                else:
                    dict_tot_Imm_doctors[key] += 1.

        lista_num_infect.append(num_I)
        lista_num_imm.append(num_Imm)

    avg_inf_drs = int(
        numpy.mean(lista_num_infect))  # i find out the average num I
    print "I", numpy.mean(lista_num_infect), numpy.mean(
        lista_num_infect) / num_Drs, avg_inf_drs, numpy.std(lista_num_infect)

    if numpy.mean(lista_num_infect
                  ) - avg_inf_drs >= 0.5:  # correccion de truncamiento
        avg_inf_drs += 1.0
    #  print avg_inf_drs

    avg_imm_drs = int(
        numpy.mean(lista_num_imm))  # i find out the average num Immune
    print "Imm", numpy.mean(lista_num_imm), numpy.mean(
        lista_num_imm) / num_Drs, avg_imm_drs, numpy.std(lista_num_imm)

    if numpy.mean(
            lista_num_imm) - avg_imm_drs >= 0.5:  # correccion de truncamiento
        avg_imm_drs += 1.0
    # print avg_imm_drs

# i sort the list from more frequently infected to less
    list_sorted_dict = sorted(dict_tot_I_doctors.iteritems(),
                              key=operator.itemgetter(1))

    new_list_sorted_dict = list_sorted_dict
    new_list_sorted_dict.reverse()

    print "I:", new_list_sorted_dict

    # i sort the list from more frequently imm to less
    list_sorted_dict_imm = sorted(dict_tot_Imm_doctors.iteritems(),
                                  key=operator.itemgetter(1))

    new_list_sorted_dict_imm = list_sorted_dict_imm
    new_list_sorted_dict_imm.reverse()

    print "Immunes:", new_list_sorted_dict_imm

    #   raw_input()

    #list_sorted_dict=[(u'Weiss', 5.0), (u'Wunderink', 5.0), (u'Keller', 4.0), (u'Go', 3.0), (u'Cuttica', 3.0), (u'Rosario', 2.0), (u'Radigan', 2.0), (u'Smith', 2.0), (u'RosenbergN', 2.0), (u'Gillespie', 1.0), (u'Osher', 1.0), (u'Mutlu', 1.0), (u'Dematte', 1.0), (u'Hawkins', 1.0), (u'Gates', 1.0)]

    lista_avg_I_drs = [
    ]  # i create the list of Drs that on average are most likely infected by the cutting day

    i = 1
    for item in new_list_sorted_dict:
        if (item[0] not in lista_avg_I_drs) and (i <= avg_inf_drs):

            lista_avg_I_drs.append(item[0])
            i += 1

    print lista_avg_I_drs, len(lista_avg_I_drs), float(
        len(lista_avg_I_drs)) / num_Drs

    lista_avg_Imm_drs = [
    ]  # i create the list of Drs that on average are most likely immune by the cutting day

    i = 1
    for item in new_list_sorted_dict_imm:
        if (item[0] not in lista_avg_Imm_drs) and (i <= avg_imm_drs) and (
                item[0] not in lista_avg_I_drs):

            lista_avg_Imm_drs.append(item[0])
            i += 1

    print lista_avg_Imm_drs, len(lista_avg_Imm_drs), float(
        len(lista_avg_Imm_drs)) / num_Drs

    # raw_input()
    for iter in range(Niter_testing):

        # i establish the initial conditions (as the average of the cutting point)

        list_I = []  #list infected doctors
        list_Immune = []
        for node in G.nodes():
            if G.node[node]['type'] != "shift":
                label = G.node[node]['label']
                G.node[node]["status"] = "S"  #by default, all are susceptible

                if label in lista_avg_I_drs:
                    G.node[node]["status"] = "I"
                    list_I.append(label)
                elif label in lista_avg_Imm_drs:
                    G.node[node]["status"] = "Immune"
                    list_Immune.append(label)
                if label in lista_avg_I_drs and label in lista_avg_Imm_drs:

                    print label, "is in the top most infected AND immune!"
                    raw_input()

        print "# I at the beginning of the testing fase:", len(list_I), float(
            len(list_I)) / num_Drs, " and # Immune:", len(list_Immune), float(
                len(list_Immune)) / num_Drs

        # print "     iter:",iter, len(list_I)

        list_single_t_evolution = []
        list_single_t_evolution.append(len(list_I))

        t = cutting_day
        while t <= max_order:  # loop over shifts, in order   just until cutting day (training segment)
            for n in G.nodes():
                if G.node[n]['type'] == "shift" and G.node[n]['order'] == t:
                    flag_possible_infection = 0
                    for doctor in G.neighbors(
                            n
                    ):  #first i check if any doctor is infected in this shift
                        if G.node[doctor]["status"] == "I":
                            flag_possible_infection = 1

                    if flag_possible_infection:
                        for doctor in G.neighbors(
                                n
                        ):  # then the doctors in that shift, gets infected with prob_infection
                            if G.node[doctor]["status"] == "S":
                                rand = random.random()
                                if rand < prob_infection:
                                    G.node[doctor]["status"] = "I"
                                    list_I.append(G.node[doctor]["label"])

            list_single_t_evolution.append(float(len(list_I)))
            #         print t, len(list_I)
            t += 1

        list_lists_t_evolutions.append(list_single_t_evolution)

        list_I = []
        list_Immune = []
        for node in G.nodes():
            if G.node[node]['type'] != "shift":
                label = G.node[node]['label']

                if G.node[node]["status"] == "I":
                    list_I.append(label)
                elif G.node[node]["status"] == "Immune":
                    list_Immune.append(label)

        print "  # I at the END of the testing fase:", len(list_I), float(
            len(list_I)) / num_Drs, " and # Immune:", len(list_Immune), float(
                len(list_Immune)) / num_Drs, "\n"

        list_dist_fixed_parameters.append(
            compare_real_evol_vs_simus_to_be_called.compare_two_curves(
                list_actual_evol_testing, list_single_t_evolution))

        # print  " dist:",list_dist_fixed_parameters[-1]

        list_dist_abs_at_ending_point_fixed_parameters.append(
            abs(list_single_t_evolution[-1] - list_actual_evol_testing[-1])
        )  # i save the distance at the ending point between the current simu and actual evol

        list_dist_at_ending_point_fixed_parameters.append(
            list_single_t_evolution[-1] - list_actual_evol_testing[-1]
        )  # i save the distance at the ending point between the current simu and actual evol

    ############### end loop Niter  for the testing

    num_valid_endings = 0.
    for item in list_dist_abs_at_ending_point_fixed_parameters:
        if item <= delta_end:  # i count how many realizations i get close enough at the ending point
            num_valid_endings += 1.

    print "average distance of the optimum in the testing segment:", numpy.mean(
        list_dist_fixed_parameters), numpy.std(
            list_dist_fixed_parameters), list_dist_fixed_parameters
    print "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter_testing, list_dist_abs_at_ending_point_fixed_parameters

    histograma_gral_negv_posit.histograma(
        list_dist_at_ending_point_fixed_parameters,
        "../Results/histogr_raw_distances_ending_test_train_infection_p" +
        str(prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" +
        str(Niter_training) + "iter_avg_ic_day" + str(cutting_day) + ".dat")

    output_file8 = "../Results/List_tot_distances_training_segment_infection_p" + str(
        prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" + str(
            Niter_training) + "iter_avg_ic_day" + str(cutting_day) + ".dat"
    file8 = open(output_file8, 'wt')

    for item in list_dist_fixed_parameters:
        print >> file8, item
    file8.close()

    output_file9 = "../Results/List_distances_ending_training_segment_infection_p" + str(
        prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" + str(
            Niter_training) + "iter_avg_ic_day" + str(cutting_day) + ".dat"
    file9 = open(output_file9, 'wt')

    for item in list_dist_abs_at_ending_point_fixed_parameters:
        print >> file9, item
    file9.close()

    output_file5 = dir + "Average_time_evolution_Infection_testing_p" + str(
        prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" + str(
            Niter_testing) + "iter_2012_avg_ic_day" + str(cutting_day) + ".dat"

    file5 = open(output_file5, 'wt')
    for s in range(len(list_single_t_evolution)):
        list_fixed_t = []
        for iter in range(Niter_testing):
            list_fixed_t.append(list_lists_t_evolutions[iter][s])
        print >> file5, s + cutting_day, numpy.mean(list_fixed_t)
    #  print  s+cutting_day,numpy.mean(list_fixed_t)
    file5.close()

    print "written training segment file:", optimum_filename
    print "written testing segment file:", output_file5

    print "printed out landscape file:", output_file3

    output_file10 = "../Results/Summary_results_training_segment_infection_p" + str(
        prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" + str(
            Niter_training) + "iter_avg_ic_day" + str(cutting_day) + ".dat"
    file10 = open(output_file10, 'wt')

    print >> file10, "Summary results from train-testing persuasion with", Niter_training, Niter_testing, "iter (respectively), using all the individual cutting points as IC, and with values for the parameters:  prob_inf ", prob_infection, " prob immune: ", prob_Immune, "\n"

    print >> file10, "average distance of the optimum in the testing segment:", numpy.mean(
        list_dist_fixed_parameters), numpy.std(
            list_dist_fixed_parameters), list_dist_fixed_parameters, "\n"
    print >> file10, "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter_testing, list_dist_abs_at_ending_point_fixed_parameters, "\n"

    print >> file10, "written training segment file:", optimum_filename
    print >> file10, "written testing segment file:", output_file5

    file10.close()
Exemplo n.º 5
0
def main(graph_name):

    G = nx.read_gml(graph_name)

    cutting_day = 243  # i use this only for the filenames

    for_testing_fixed_set = "YES"  # when YES, fixed values param, to get all statistics on final distances etc
    # change the range for the parameters accordingly

    envelopes = "YES"

    Niter = 1000  # 100 iter seems to be enough (no big diff. with respect to 1000it)

    percent_envelope = 95.

    list_id_weekends_T3 = look_for_T3_weekends(
        G
    )  # T3 doesnt share fellows in the weekend  (but they are the exception)
    Nbins = 1000  # for the histogram of sum of distances

    all_team = "NO"  # as adopters or not

    dir_real_data = '../Results/'
    dir = "../Results/weight_shifts/infection/"

    delta_end = 3.  # >= than + or -  dr difference at the end of the evolution (NO realization ends up closer than this!!!! if 2, i get and empty list!!!)

    ######################################################################################
    #  I read the file of the actual evolution of the idea spreading in the hospital:   ##
    ######################################################################################

    filename_actual_evol = "../Data/Attendings_Orders_from_inference_list_adopters_day.dat"

    file1 = open(
        filename_actual_evol, 'r'
    )  ## i read the file:  list_dates_and_names_current_adopters.txt  (created with: extract_real_evolution_number_adopters.py)
    list_lines_file = file1.readlines()

    dict_days_list_empirical_adopters = {}
    list_actual_evol = []
    for line in list_lines_file:  # [1:]:   # i exclude the first row
        day = int(line.split(" ")[0])
        num_adopters = float(line.split(" ")[1])
        list_actual_evol.append(num_adopters)
        list_current_adopters = []
        for element in line.split(
                " "
        )[2:]:  # i need to ignore the empty columns from the original datafile
            if element:
                if element != '\n':
                    list_current_adopters.append(element.strip('\n'))

        dict_days_list_empirical_adopters[day] = list_current_adopters

    list_actual_evol_testing = list_actual_evol[cutting_day:]

    ##################################################################

    prob_min = 0.8
    prob_max = 0.801
    delta_prob = 0.1

    prob_Immune_min = 0.10
    prob_Immune_max = 0.101
    delta_prob_Immune = 0.1

    # threshold is not personal, and set randomly to a value (0,1)

    dose_min = 0.2  # of a single encounter with an infected  (starting from zero doesnt make sense)
    dose_max = 0.201
    delta_dose = 0.101

    prob_Immune = prob_Immune_min
    while prob_Immune <= prob_Immune_max:

        print "prom Immune:", prob_Immune

        prob_infection = prob_min
        while prob_infection <= prob_max:

            print "  p:", prob_infection

            dose = dose_min
            while dose <= dose_max:

                print "  dose:", dose

                output_file2 = dir + "Average_time_evolution_Infection_memory_p" + str(
                    prob_infection) + "_Immune" + str(
                        prob_Immune
                    ) + "_FIXED_threshold_from_distrib_dose" + str(
                        dose) + "_" + str(Niter) + "iter_day" + str(
                            cutting_day) + "_A_F_inferred_middle_real_ic.dat"

                file2 = open(output_file2, 'wt')
                file2.close()

                #  list_final_I_values_fixed_p=[]  # i dont care about the final values right now, but about the whole time evol
                list_lists_t_evolutions = []

                list_dist_fixed_parameters_testing_segment = []
                list_abs_dist_at_ending_point_fixed_parameters = []
                list_dist_at_ending_point_fixed_parameters = []
                list_final_num_infected = []
                list_abs_dist_point_by_point_indiv_simus_to_actual = []
                list_dist_point_by_point_indiv_simus_to_actual = []

                #   list_abs_dist_at_cutting_day=[]

                for iter in range(Niter):

                    #   print "     iter:",iter

                    ########### set I.C.

                    list_I = []  #list infected doctors
                    max_order = 0
                    for n in G.nodes():
                        G.node[n]["status"] = "S"  # all nodes are Susceptible
                        G.node[n][
                            "infec_value"] = 0.  # when this value goes over the infect_threshold, the dr is infected
                        G.node[n]["personal_threshold"] = random.random(
                        )  # for a dr to become infected

                        if G.node[n]['type'] == "shift":
                            if G.node[n]['order'] > max_order:
                                max_order = G.node[n][
                                    'order']  # to get the last shift-order for the time loop
                        else:
                            if G.node[n][
                                    'label'] in dict_days_list_empirical_adopters[
                                        cutting_day]:
                                G.node[n]["infec_value"] = G.node[n][
                                    "personal_threshold"] + 1.
                                G.node[n]["status"] = "I"
                                list_I.append(G.node[n]['label'])

                    list_single_t_evolution = []
                    old_num_adopters = len(
                        dict_days_list_empirical_adopters[cutting_day])
                    list_single_t_evolution.append(
                        old_num_adopters
                    )  # I always start with TWO infected doctors!!

                    for n in G.nodes(
                    ):  # i make some DOCTORs INMUNE  (anyone except Weiss and Wunderink)
                        if (G.node[n]['type'] == "A") or (G.node[n]['type']
                                                          == "F"):
                            if G.node[n][
                                    'label'] not in dict_days_list_empirical_adopters[
                                        cutting_day]:
                                rand = random.random()
                                if rand < prob_Immune:
                                    G.node[n]["status"] = "Immune"

                    ################# the dynamics starts:

                    shift_length = 5  #i know the first shift (order 0) is of length 5

                    t = cutting_day
                    while t <= max_order:  # loop over shifts, in order
                        for n in G.nodes():
                            if G.node[n]['type'] == "shift" and G.node[n][
                                    'order'] == t:
                                shift_length = int(G.node[n]['shift_length'])

                                if shift_length == 2 and n not in list_id_weekends_T3:
                                    shift_length = 1  # because during weekends, the fellow does rounds one day with Att1 and the other day with Att2.  (weekend shifts for T3 are two day long, with no sharing fellows)

                                flag_possible_infection = 0
                                for doctor in G.neighbors(
                                        n
                                ):  #first i check if any doctor is infected in this shift
                                    if G.node[doctor]["status"] == "I":
                                        flag_possible_infection = 1

                                if flag_possible_infection:
                                    for doctor in G.neighbors(
                                            n
                                    ):  # then the doctors in that shift, gets infected with prob_infection

                                        for i in range(shift_length):
                                            if G.node[doctor]["status"] == "S":
                                                rand = random.random()
                                                if rand < prob_infection:  # with prob p the infection occurres

                                                    G.node[doctor][
                                                        "infec_value"] += dose  # and bumps the infection_value of that susceptible dr

                                                    if G.node[doctor][
                                                            "infec_value"] >= G.node[
                                                                doctor][
                                                                    "personal_threshold"]:  # the threshold for infection is personal

                                                        G.node[doctor][
                                                            "status"] = "I"
                                                        # if G.node[doctor]["type"]=="A":   # fellows participate in the dynamics, but i only consider the attendings as real adopters
                                                        list_I.append(
                                                            G.node[doctor]
                                                            ["label"])

                        new_num_adopters = len(list_I)

                        if shift_length == 5:  # i estimate that adoption happens in the middle of the shift
                            if t + 5 < max_order:
                                list_single_t_evolution.append(
                                    old_num_adopters)
                            if t + 4 < max_order:
                                list_single_t_evolution.append(
                                    old_num_adopters)
                            if t + 3 < max_order:
                                list_single_t_evolution.append(
                                    new_num_adopters)
                            if t + 2 < max_order:
                                list_single_t_evolution.append(
                                    new_num_adopters)
                            if t + 1 < max_order:
                                list_single_t_evolution.append(
                                    new_num_adopters)
                            t += 5

                        elif shift_length == 4:
                            if t + 4 < max_order:
                                list_single_t_evolution.append(
                                    old_num_adopters)
                            if t + 3 < max_order:
                                list_single_t_evolution.append(
                                    old_num_adopters)

                            if t + 2 < max_order:
                                list_single_t_evolution.append(
                                    new_num_adopters)

                            if t + 1 < max_order:
                                list_single_t_evolution.append(
                                    new_num_adopters)
                            t += 4

                        elif shift_length == 3:
                            if t + 3 < max_order:
                                list_single_t_evolution.append(
                                    old_num_adopters)

                            if t + 2 < max_order:
                                list_single_t_evolution.append(
                                    new_num_adopters)

                            if t + 1 < max_order:
                                list_single_t_evolution.append(
                                    new_num_adopters)

                            t += 3

                        elif shift_length == 2:
                            if t + 2 < max_order:
                                list_single_t_evolution.append(
                                    old_num_adopters)

                            if t + 1 < max_order:
                                list_single_t_evolution.append(
                                    new_num_adopters)

                            t += 2

                        elif shift_length == 1:
                            if t + 1 < max_order:
                                list_single_t_evolution.append(
                                    new_num_adopters)

                            t += 1

                        old_num_adopters = new_num_adopters

                        ######## end t loop

                    list_lists_t_evolutions.append(list_single_t_evolution)

                    # i only run the testing segment
                    list_dist_fixed_parameters_testing_segment.append(
                        compare_real_evol_vs_simus_to_be_called.
                        compare_two_curves(list_actual_evol_testing,
                                           list_single_t_evolution))

                    list_abs_dist_at_ending_point_fixed_parameters.append(
                        abs(list_single_t_evolution[-1] -
                            list_actual_evol_testing[-1])
                    )  # i save the distance at the ending point between the current simu and actual evol
                    list_dist_at_ending_point_fixed_parameters.append(
                        list_single_t_evolution[-1] -
                        list_actual_evol_testing[-1]
                    )  # i save the distance at the ending point between the current simu and actual evol
                    list_final_num_infected.append(list_single_t_evolution[-1])

                    for index in range(len(list_single_t_evolution)):

                        list_abs_dist_point_by_point_indiv_simus_to_actual.append(
                            abs(list_single_t_evolution[index] -
                                list_actual_evol_testing[index]))
                        list_dist_point_by_point_indiv_simus_to_actual.append(
                            list_single_t_evolution[index] -
                            list_actual_evol_testing[index])

                ######## end loop Niter

                file2 = open(output_file2, 'at')
                for s in range(len(list_single_t_evolution)):
                    list_fixed_t = []
                    for iter in range(Niter):
                        list_fixed_t.append(list_lists_t_evolutions[iter][s])
                    print >> file2, s + cutting_day, numpy.mean(list_fixed_t)
                file2.close()

                print "printed out: ", output_file2
                # raw_input()

                if envelopes == "YES":
                    calculate_envelope_set_curves.calculate_envelope(
                        list_lists_t_evolutions, percent_envelope, "Infection",
                        [prob_infection, prob_Immune])

                num_valid_endings = 0.
                for item in list_abs_dist_at_ending_point_fixed_parameters:
                    if item <= delta_end:  # i count how many realizations i get close enough at the ending point
                        num_valid_endings += 1.

                print "average distance of the optimum in the testing segment:", numpy.mean(
                    list_dist_fixed_parameters_testing_segment), numpy.std(
                        list_dist_fixed_parameters_testing_segment
                    ), list_dist_fixed_parameters_testing_segment, "\n"
                print "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter, "mean ending dist:", numpy.mean(
                    list_dist_at_ending_point_fixed_parameters
                ), "SD final dist", numpy.std(
                    list_dist_at_ending_point_fixed_parameters
                ), list_dist_at_ending_point_fixed_parameters, "\n"

                histogram_filename = "../Results/weight_shifts/histogr_raw_distances_ending_infection_memory_p" + str(
                    prob_infection) + "_Immune" + str(
                        prob_Immune) + "_threshold_from_distrib_dose" + str(
                            dose) + "_" + str(Niter) + "iter_day" + str(
                                cutting_day
                            ) + "_A_F_inferred_middle_real_ic.dat"

                histograma_gral_negv_posit.histograma(
                    list_dist_at_ending_point_fixed_parameters,
                    histogram_filename)

                #  histogram_filename2="../Results/weight_shifts/histogr_sum_dist_traject_infection_memory_p"+str(prob_infection)+"_Immune"+str(prob_Immune)+"_threshold_from_distrib_dose"+str(dose)+"_"+str(Niter)+"iter_day"+str(cutting_day)+"_A_F_inferred_middle.dat"

                # histograma_bines_gral.histograma_bins(list_dist_fixed_parameters,Nbins,histogram_filename2)

                histogram_filename3 = "../Results/weight_shifts/histogr_sum_dist_testing_segment_infection_memory_p" + str(
                    prob_infection) + "_Immune" + str(
                        prob_Immune) + "_threshold_from_distrib_dose" + str(
                            dose) + "_" + str(Niter) + "iter_day" + str(
                                cutting_day
                            ) + "_A_F_inferred_middle_real_ic.dat"

                #print list_dist_fixed_parameters_testing_segment
                histograma_bines_gral.histograma_bins_zero(
                    list_dist_fixed_parameters_testing_segment, Nbins,
                    histogram_filename3)

                print min(list_dist_fixed_parameters_testing_segment), max(
                    list_dist_fixed_parameters_testing_segment)

                histogram_filename4 = "../Results/weight_shifts/histogr_abs_dist_point_by_point_infection_memory_p" + str(
                    prob_infection) + "_Immune" + str(
                        prob_Immune) + "_threshold_from_distrib_dose" + str(
                            dose) + "_" + str(Niter) + "iter_day" + str(
                                cutting_day
                            ) + "_A_F_inferred_middle_real_ic.dat"

                histograma_gral_negv_posit.histograma(
                    list_abs_dist_point_by_point_indiv_simus_to_actual,
                    histogram_filename4)

                histogram_filename5 = "../Results/weight_shifts/histogr_dist_point_by_point_infection_memory_p" + str(
                    prob_infection) + "_Immune" + str(
                        prob_Immune) + "_threshold_from_distrib_dose" + str(
                            dose) + "_" + str(Niter) + "iter_day" + str(
                                cutting_day
                            ) + "_A_F_inferred_middle_real_ic.dat"

                histograma_gral_negv_posit.histograma(
                    list_dist_point_by_point_indiv_simus_to_actual,
                    histogram_filename5)

                output_file10 = "../Results/weight_shifts/Summary_results_infection_memory_p" + str(
                    prob_infection) + "_Immune" + str(
                        prob_Immune) + "_threshold_from_distrib_dose" + str(
                            dose) + "_" + str(Niter) + "iter_day" + str(
                                cutting_day
                            ) + "_A_F_inferred_middle_real_ic.dat"
                file10 = open(output_file10, 'wt')

                print >> file10, "Summary results from best fit infection _memory with", Niter, "iter, and with values for the parameters:  prob_inf ", prob_infection, " prob immune: ", prob_Immune, "\n"

                print >> file10, "average distance of the optimum in the testing segment:", numpy.mean(
                    list_dist_fixed_parameters_testing_segment), numpy.std(
                        list_dist_fixed_parameters_testing_segment
                    ), list_dist_fixed_parameters_testing_segment, "\n"
                print >> file10, "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter, "mean ending dist:", numpy.mean(
                    list_dist_at_ending_point_fixed_parameters
                ), "SD final dist", numpy.std(
                    list_dist_at_ending_point_fixed_parameters
                ), list_dist_at_ending_point_fixed_parameters, "\n"

                print >> file10, "written optimum best fit evolution file:", output_file2
                print >> file10, "written histogram file: ", histogram_filename

                file10.close()

                print "written Summary file: ", output_file10

                dose += delta_dose
            prob_infection += delta_prob
        prob_Immune += delta_prob_Immune
def main(graph_name):
 

   G = nx.read_gml(graph_name)


   list_id_weekends_T3=look_for_T3_weekends(G)  # T3 doesnt share fellows in the weekend  (but they are the exception)

   percent_envelope=95.

   Niter=1000



   cutting_day=125

   min_sum_dist=20   # to compute number of realizations that have a sum of distances smaller than this

   Nbins=200   # for the histogram of sum of distances



   envelopes="NO"


   delta_end=3.  # >= than + or -  dr difference at the end of the evolution

   dir_real_data='../Results/'



######################################################################################
#  I read the file of the actual evolution of the idea spreading in the hospital:   ##
######################################################################################




   filename_actual_evol="../Data/Attendings_Orders_from_inference_list_adopters_day.dat"    #   "../Results/Actual_evolution_adopters_from_inference.dat"
  


   file1=open(filename_actual_evol,'r')         ## i read the file:  list_dates_and_names_current_adopters.txt  (created with: extract_real_evolution_number_adopters.py)
   list_lines_file=file1.readlines()
            


   dict_days_list_empirical_adopters={}
   list_actual_evol=[]  
   for line in list_lines_file:      # [1:]:   # i exclude the first row            
      day=int(line.split(" ")[0])       
      num_adopters= float(line.split(" ")[1])          
      list_actual_evol.append(num_adopters)
      list_current_adopters=[]
      for element in line.split(" ")[2:]:   # i need to ignore the empty columns from the original datafile
         if element:
            if element != '\n':
               list_current_adopters.append(element.strip('\n'))
     

      dict_days_list_empirical_adopters[day]=list_current_adopters
  
   


   list_actual_evol_testing=list_actual_evol[cutting_day:]

##################################################################




#../Results/weight_shifts/persuasion/Time_evolutions_Persuasion_training_alpha0.5_damping0.4_mutual_encourg0.5_threshold0.5_unif_distr_1000iter_2012_seed31Oct_finalnetwork_day125.dat
#OJO!!! NECESITO DOS DECIMALES SIEMPRE, PARA QUE CUADRE CON EL NOMBRE DE LOS SUB-DIRECTORIOS DONDE LO GUARDO

 
   alpha_F_min=0.50   #0.15   # alpha=0: nobody changes their mind
   alpha_F_max=0.501  #0.351
   delta_alpha_F=0.10    #AVOID 1.0 OR THE DYNAMICS GETS TOTALLY STUCK AND IT IS NOT ABLE TO PREDICT SHIT!
   

   min_damping=0.600   #0.0     #its harder to go back from YES to NO again. =1 means no effect, =0.5 half the movement from Y->N than the other way around, =0 never go back from Y to N
   max_damping=0.601    #0.451
   delta_damping=0.10  
   
   


   min_mutual_encouragement=0.40   #0.50  # when two Adopters meet, they convince each other even more
   max_mutual_encouragement=0.401   # 0.51   # KEEP THIS FIXED VALUES FOR NOW
   delta_mutual_encouragement=0.10
   
   
   threshold_min=0.50  #0.50  # larger than, to be an Adopte
   threshold_max=0.501  # 0.51    # KEEP THIS FIXED VALUES FOR NOW
   delta_threshold=0.10   # AVOID 1.0 OR THE DYNAMICS GETS TOTALLY STUCK AND IT IS NOT ABLE TO PREDICT SHIT
    

   
   
   print "\n\nPersuasion process on network, with Niter:",Niter
   
   
   

   threshold=threshold_min
   while   threshold<= threshold_max:
      print   "thershold:",threshold

      alpha_F=alpha_F_min
      while alpha_F<= alpha_F_max:            # i explore all the parameter space, and create a file per each set of valuesllkl
        alpha_A=1.0*alpha_F
        print "  alpha_F:",alpha_F

        mutual_encouragement=min_mutual_encouragement  
        while  mutual_encouragement <= max_mutual_encouragement:
          print "    mutual_encouragement:",mutual_encouragement

          damping=min_damping
          while   damping <= max_damping:
            print "      damping:",damping

                             
            dir="../Results/weight_shifts/persuasion/alpha%.2f_damping%.2f/"  % (alpha_F, damping )

           
            output_file=dir+"Time_evol_Persuasion_alpha"+str(alpha_F)+"_damping"+str(damping)+"_mutual"+str(mutual_encouragement)+"_threshold"+str(threshold)+"_"+str(Niter)+"iter_"+str(cutting_day)+"_A_F_inferred_middle_real_ic.dat"     


            file = open(output_file,'wt')    
            file.close()
            


            time_evol_number_adopters_ITER=[]  # list of complete single realizations of the dynamics
           
            list_dist_fixed_parameters_testing_segment=[]
            list_dist_abs_at_ending_point_fixed_parameters=[]
            list_dist_at_ending_point_fixed_parameters=[]
            list_final_num_adopt=[]
            list_abs_dist_point_by_point_indiv_simus_to_actual=[]
            list_dist_point_by_point_indiv_simus_to_actual=[]

            #list_abs_dist_at_cutting_day=[]
            for iter in range(Niter):

               # print "         ",iter
               
           

                num_realizations_sum_dist_small=0.
                time_evol_number_adopters=[]   # for a single realization of the dynamics


                num_adopters , max_shift= set_ic(G,threshold,cutting_day,dict_days_list_empirical_adopters)   

                time_evol_number_adopters.append(float(num_adopters))
                old_num_adopters=num_adopters
                
          

                
               # the dynamics starts:                 
                shift_length=5    #i know the first shift (order 0) is of length 5


                t=cutting_day
                while t<= max_shift:  # loop over shifts, in chronological order  (the order is the day index since seeding_day) 
                   # print 't:',t
                  
                    for n in G.nodes():
                       if G.node[n]['type']=="shift" and G.node[n]['order']==t:  # i look for the shift corresponding to that time step       
                            
                            shift_length=int(G.node[n]['shift_length'])
                          
                            if shift_length==2 and n not in list_id_weekends_T3:
                               shift_length=1   # because during weekends, the fellow does rounds one day with Att1 and the other day with Att2.  (weekend shifts for T3 are two day long, with no sharing fellows)

#    print "one-day weekend", G.node[n]['label'],G.node[n]['shift_length']




                            flag_possible_persuasion=0
                            for doctor in G.neighbors(n):                               
                                if G.node[doctor]["status"]=="Adopter":   #first i check if any doctor is an adopter in this shift         
                                    flag_possible_persuasion=1                               
                                    break

                            if flag_possible_persuasion==1:
                                list_doctors=[]
                                for doctor in G.neighbors(n):   # for all drs in that shift
                                    list_doctors.append(doctor)
                                
                                
                                pairs=itertools.combinations(list_doctors,2)    # cos the shift can be 2 but also 3 doctors 
                                for pair in pairs:
                                    doctor1=pair[0]
                                    doctor2=pair[1]
                                                                                        
                                    if G.node[doctor1]['status'] != G.node[doctor2]['status']:  # if they think differently, 
                                                                                              # there will be persuasion
                                        persuasion(G,damping,doctor1,doctor2,alpha_A,alpha_F,threshold,shift_length)   # i move their values of opinion                  
                                        update_opinions(G,threshold,doctor1,doctor2) #  i update status and make sure the values of the vectors stay between [0,1] 
                                  
                                    else:  # if two Adopters meet, they encourage each other (if two NonAdopters, nothing happens)
                                   
                                       mutual_reinforcement(G,mutual_encouragement,doctor1,doctor2,shift_length)
                           # else:
                            #   print "  no persuasion possible during shift (no adopters present)!"   
                               
                    list_Adopters=[]        
                    for n in G.nodes():              
                       try:
                          if  G.node[n]["status"]=="Adopter":                                                    
                             if G.node[n]["label"] not in list_Adopters :#and G.node[n]["type"]=="A":
                                list_Adopters.append(G.node[n]["label"])
                       except: pass  # if the node is a shift, it doesnt have a 'status' attribute                   
                    new_num_adopters=len(list_Adopters)


                    if  shift_length==5: # i estimate that adoption happens in the middle of the shift
                       if t+5 < max_shift:
                          time_evol_number_adopters.append(old_num_adopters) 
                       if t+4 < max_shift:
                          time_evol_number_adopters.append(old_num_adopters) 
                       if t+3 < max_shift:
                          time_evol_number_adopters.append(new_num_adopters)
                       if t+2 < max_shift:
                          time_evol_number_adopters.append(new_num_adopters) 
                       if t+1 < max_shift:
                              time_evol_number_adopters.append(new_num_adopters) 
                       t+=5
                      
        
                    elif  shift_length==4:
                        if t+4 < max_shift:
                           time_evol_number_adopters.append(old_num_adopters)                     
                        if t+3 < max_shift:
                           time_evol_number_adopters.append(old_num_adopters) 

                        if t+2 < max_shift:
                           time_evol_number_adopters.append(new_num_adopters)                       
                       
                        if t+1 < max_shift:
                           time_evol_number_adopters.append(new_num_adopters) 
                        t+=4
                      
                    elif  shift_length==3:
                        if t+3 < max_shift:
                           time_evol_number_adopters.append(old_num_adopters)                     
                       
                        if t+2 < max_shift:
                           time_evol_number_adopters.append(new_num_adopters)
                       
                        if t+1 < max_shift:
                           time_evol_number_adopters.append(new_num_adopters)
                       
                        t+=3
                      


                    elif  shift_length==2:
                        if t+2 < max_shift:
                           time_evol_number_adopters.append(old_num_adopters)                     
                       
                        if t+1 < max_shift:
                           time_evol_number_adopters.append(new_num_adopters)
                       
                      
                        t+=2
                      
                    elif  shift_length==1:                      
                        if t+1 < max_shift:
                           time_evol_number_adopters.append(new_num_adopters)                       
                       
                        t+=1
                      

                    old_num_adopters=new_num_adopters             
                   
   

                ############## end while loop over t
               

                time_evol_number_adopters_ITER.append(time_evol_number_adopters)
               
                
                # now i only run the testing segment!
                dist=compare_real_evol_vs_simus_to_be_called.compare_two_curves( list_actual_evol_testing,time_evol_number_adopters)
                list_dist_fixed_parameters_testing_segment.append(dist)
                if dist < min_sum_dist:
                   num_realizations_sum_dist_small+=1
               
                list_dist_abs_at_ending_point_fixed_parameters.append( abs(time_evol_number_adopters[-1]-list_actual_evol_testing[-1]) )
                list_dist_at_ending_point_fixed_parameters.append(time_evol_number_adopters[-1]-list_actual_evol_testing[-1]) 


                list_final_num_adopt.append(time_evol_number_adopters[-1])

                


                for  index in range(len(time_evol_number_adopters)):
                   
                   list_abs_dist_point_by_point_indiv_simus_to_actual.append(abs(time_evol_number_adopters[index]-list_actual_evol_testing[index]))
                   list_dist_point_by_point_indiv_simus_to_actual.append(time_evol_number_adopters[index]-list_actual_evol_testing[index])
               
              
             

            #######################end loop over Niter
          
         


           
            file = open(output_file,'wt')        
            for i in range(len(time_evol_number_adopters)):  #time step by time step
                list_fixed_t=[]
                for iteracion in range (Niter): #loop over all independent iter of the process
                    list_fixed_t.append(time_evol_number_adopters_ITER[iteracion][i])  # i collect all values for the same t, different iter  

                print >> file,i+cutting_day,numpy.mean(list_fixed_t),numpy.std(list_fixed_t), alpha_F,damping,mutual_encouragement       
            file.close()


            print "printed out:  ",output_file

            if envelopes=="YES":
               calculate_envelope_set_curves.calculate_envelope(time_evol_number_adopters_ITER,percent_envelope,"Persuasion",[alpha_F,damping,mutual_encouragement,threshold])
          


           
   
            num_valid_endings=0.
            for item in list_dist_abs_at_ending_point_fixed_parameters:
                  if item <= delta_end:  # i count how many realizations i get close enough at the ending point         
                     num_valid_endings+=1.
     

            print "average distance of the optimum in the testing segment:",numpy.mean(list_dist_fixed_parameters_testing_segment),numpy.std(list_dist_fixed_parameters_testing_segment),list_dist_fixed_parameters_testing_segment,"\n"
            print "fraction of realizations that end within delta_doctor:",num_valid_endings/Niter,"mean ending dist:",numpy.mean(list_dist_at_ending_point_fixed_parameters), "SD final dist",numpy.std(list_dist_at_ending_point_fixed_parameters),list_dist_at_ending_point_fixed_parameters
    


            histogram_filename="../Results/weight_shifts/histogr_raw_distances_ending_persuasion_alpha"+str(alpha_F)+"_damping"+str(damping)+"_mutual_encourg"+str(mutual_encouragement)+"_threshold"+str(threshold)+"_"+str(Niter)+"iter_day"+str(cutting_day)+"_A_F_inferred_middle_real_ic.dat"     
            histograma_gral_negv_posit.histograma(list_dist_at_ending_point_fixed_parameters, histogram_filename) 




            #   histogram_filename2="../Results/weight_shifts/histogr_sum_dist_traject_persuasion_alpha"+str(alpha_F)+"_damping"+str(damping)+"_mutual_encourg"+str(mutual_encouragement)+"_threshold"+str(threshold)+"_"+str(Niter)+"iter_alphaA_eq_alphaF_day"+str(cutting_day)+"_A_F_inferred_middle.dat"
          
             #  histograma_bines_gral.histograma_bins(list_dist_fixed_parameters,Nbins,histogram_filename2)




            histogram_filename3="../Results/weight_shifts/histogr_sum_dist_testing_segment_persuasion_alpha"+str(alpha_F)+"_damping"+str(damping)+"_mutual_encourg"+str(mutual_encouragement)+"_threshold"+str(threshold)+"_"+str(Niter)+"iter_day"+str(cutting_day)+"_A_F_inferred_middle_real_ic.dat"     
               
              
                           
            histograma_bines_gral.histograma_bins_zero(list_dist_fixed_parameters_testing_segment,Nbins,histogram_filename3)
            print min(list_dist_fixed_parameters_testing_segment),max(list_dist_fixed_parameters_testing_segment)

              



            histogram_filename4="../Results/weight_shifts/histogr_abs_dist_point_by_point_persuasion_alpha"+str(alpha_F)+"_damping"+str(damping)+"_mutual_encourg"+str(mutual_encouragement)+"_threshold"+str(threshold)+"_"+str(Niter)+"iter_day"+str(cutting_day)+"_A_F_inferred_middle_real_ic.dat"     
            
            histograma_gral_negv_posit.histograma(list_abs_dist_point_by_point_indiv_simus_to_actual, histogram_filename4)
            
            
            
            
            
            histogram_filename5="../Results/weight_shifts/histogr_dist_point_by_point_persuasion_alpha"+str(alpha_F)+"_damping"+str(damping)+"_mutual_encourg"+str(mutual_encouragement)+"_threshold"+str(threshold)+"_"+str(Niter)+"iter_day"+str(cutting_day)+"_A_F_inferred_middle_real_ic.dat"     
            
            histograma_gral_negv_posit.histograma(list_dist_point_by_point_indiv_simus_to_actual, histogram_filename5)








            output_file10="../Results/weight_shifts/Summary_results_persuasion_alpha"+str(alpha_F)+"_damping"+str(damping)+"_mutual_encourg"+str(mutual_encouragement)+"_threshold"+str(threshold)+"_"+str(Niter)+"iter_day"+str(cutting_day)+"_A_F_inferred_middle_real_ic.dat"              
            file10 = open(output_file10,'wt')    
            
            print >> file10, "Summary results from best fit persuasion with",Niter, "iter, and with values for the parameters:  alpha ",alpha_F," damping: ",damping," mutual_encourg: ",mutual_encouragement," threshold:",threshold
            
            print >> file10, "average distance of the optimum in the testing segment:",numpy.mean(list_dist_fixed_parameters_testing_segment),numpy.std(list_dist_fixed_parameters_testing_segment),list_dist_fixed_parameters_testing_segment
            print >> file10,   "fraction of realizations that end within delta_doctor:",num_valid_endings/Niter,"mean ending dist:",numpy.mean(list_dist_at_ending_point_fixed_parameters), "SD final dist",numpy.std(list_dist_at_ending_point_fixed_parameters),list_dist_at_ending_point_fixed_parameters
            
            
            print >> file10,  "written optimum train_test evolution file:",output_file
            print  >> file10,"written histogram file: ",histogram_filename           
           # print  >> file10,"written histogram file: ",histogram_filename2
            
            file10.close()
            
            
            
            
            print  "written optimum train_test evolution file:",output_file
            
            print "written summary file: ",output_file10
            
  

          
            damping += delta_damping
          mutual_encouragement += delta_mutual_encouragement
        alpha_F += delta_alpha_F
      threshold  += delta_threshold
Exemplo n.º 7
0
def main(graph_name):

    G = nx.read_gml(graph_name)

    for_testing_fixed_set = "YES"  # when YES, fixed values param, to get all statistics on final distances etc
    # change the range for the parameters accordingly

    envelopes = "YES"

    Niter = 1000  # 100 iter seems to be enough (no big diff. with respect to 1000it)

    percent_envelope = 95.

    list_id_weekends_T3 = look_for_T3_weekends(
        G
    )  # T3 doesnt share fellows in the weekend  (but they are the exception)
    Nbins = 20  # for the histogram of sum of distances

    cutting_day = 175  # i use this only for the filenames

    all_team = "NO"  # as adopters or not

    dir_real_data = '../Results/'
    dir = "../Results/weight_shifts/infection/"

    delta_end = 3.  # >= than + or -  dr difference at the end of the evolution (NO realization ends up closer than this!!!! if 2, i get and empty list!!!)

    if for_testing_fixed_set == "NO":
        output_file3 = "../Results/weight_shifts/Landscape_parameters_infection_" + str(
            Niter) + "iter.dat"
        file3 = open(output_file3, 'wt')

        file3.close()

######################################################################################
#  I read the file of the actual evolution of the idea spreading in the hospital:   ##
######################################################################################

    if all_team == "YES":
        print "remember that now i use the file of adopters without fellows\n../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"
        exit()

    else:
        filename_actual_evol = "../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"

    file1 = open(
        filename_actual_evol, 'r'
    )  ## i read the file:  list_dates_and_names_current_adopters.txt  (created with: extract_real_evolution_number_adopters.py)
    list_lines_file = file1.readlines()

    list_actual_evol = []
    for line in list_lines_file:  # [1:]:   # i exclude the first row

        num_adopters = float(line.split(" ")[1])
        list_actual_evol.append(num_adopters)

##################################################################

#../Results/weight_shifts/infection/Average_time_evolution_Infection_training_p0.8_Immune0.3_1000iter_2012_avg_ic_day125.dat ESTOS VALORES SON EL OPTIMUM FIT THE 152-DIAS
    prob_min = 0.1
    prob_max = 0.101
    delta_prob = 0.1

    prob_Immune_min = 0.00
    prob_Immune_max = 0.001
    delta_prob_Immune = 0.1

    dict_filenames_tot_distance = {
    }  # i will save the filename as key and the tot distance from that curve to the original one

    prob_Immune = prob_Immune_min
    while prob_Immune <= prob_Immune_max:

        print "prom Immune:", prob_Immune

        prob_infection = prob_min
        while prob_infection <= prob_max:

            print "  p:", prob_infection

            if for_testing_fixed_set == "YES":
                output_file2 = dir + "Average_time_evolution_Infection_train_test_p" + str(
                    prob_infection) + "_" + "Immune" + str(
                        prob_Immune) + "_" + str(Niter) + "iter_2012.dat"

            else:
                output_file2 = dir + "Average_time_evolution_Infection_p" + str(
                    prob_infection) + "_" + "Immune" + str(
                        prob_Immune) + "_" + str(Niter) + "iter_2012.dat"

            file2 = open(output_file2, 'wt')
            file2.close()

            #  list_final_I_values_fixed_p=[]  # i dont care about the final values right now, but about the whole time evol
            list_lists_t_evolutions = []

            list_dist_fixed_parameters = []
            list_abs_dist_at_ending_point_fixed_parameters = []
            list_dist_at_ending_point_fixed_parameters = []
            list_final_num_infected = []

            #   list_abs_dist_at_cutting_day=[]

            for iter in range(Niter):

                #print "     iter:",iter

                #######OJO~!!!!!!!!!! COMENTAR ESTO CUANDO ESTOY BARRIENDO TOOOOOOOOOODO EL ESPACIO DE PARAMETROS
                #    file_name_indiv_evol=output_file2.strip("Average_").split('.dat')[0]+"_indiv_iter"+str(iter)+".dat"

                #   file4 = open(file_name_indiv_evol,'wt')
                #  file4.close()
                ##########################################

                ########### set I.C.

                list_I = []  #list infected doctors
                max_order = 0
                for n in G.nodes():
                    G.node[n]["status"] = "S"  # all nodes are Susceptible
                    if G.node[n]['type'] == "shift":
                        if G.node[n]['order'] > max_order:
                            max_order = G.node[n][
                                'order']  # to get the last shift-order for the time loop
                    else:
                        if G.node[n]['label'] == "Wunderink" or G.node[n][
                                "label"] == "Weiss":
                            G.node[n]["status"] = "I"
                            list_I.append(G.node[n]['label'])

                list_single_t_evolution = []
                list_single_t_evolution.append(
                    2.0)  # I always start with TWO infected doctors!!

                for n in G.nodes(
                ):  # i make some DOCTORs INMUNE  (anyone except Weiss and Wunderink)
                    if (G.node[n]['type'] == "A") or (G.node[n]['type']
                                                      == "F"):
                        if G.node[n]['label'] != "Wunderink" and G.node[n][
                                "label"] != "Weiss":
                            rand = random.random()
                            if rand < prob_Immune:
                                G.node[n]["status"] = "Immune"

                ################# the dynamics starts:

                t = 1
                while t <= max_order:  # loop over shifts, in order
                    for n in G.nodes():
                        if G.node[n]['type'] == "shift" and G.node[n][
                                'order'] == t:

                            shift_lenght = int(G.node[n]['shift_lenght'])

                            if shift_lenght == 2 and n not in list_id_weekends_T3:
                                shift_lenght = 1  # because during weekends, the fellow does rounds one day with Att1 and the other day with Att2.  (weekend shifts for T3 are two day long, with no sharing fellows)
                            #  print "one-day weekend", G.node[n]['label'],G.node[n]['shift_lenght']

                            flag_possible_infection = 0
                            for doctor in G.neighbors(
                                    n
                            ):  #first i check if any doctor is infected in this shift
                                if G.node[doctor]["status"] == "I":
                                    flag_possible_infection = 1

                            if flag_possible_infection:
                                for doctor in G.neighbors(
                                        n
                                ):  # then the doctors in that shift, gets infected with prob_infection

                                    for i in range(
                                            shift_lenght
                                    ):  # i repeat the infection process several times, to acount for shift lenght
                                        if G.node[doctor]["status"] == "S":
                                            rand = random.random()
                                            if rand < prob_infection:
                                                G.node[doctor]["status"] = "I"

                                                if G.node[doctor][
                                                        "type"] == "A":  # fellows participate in the dynamics, but i only consider the attendings as real adopters
                                                    list_I.append(
                                                        G.node[doctor]
                                                        ["label"])

                #  if for_testing_fixed_set=="YES":
                #    if t==cutting_day:
                #      list_abs_dist_at_cutting_day.append(abs(float(list_actual_evol[-1])-float(len(list_I))))
                #     print abs(float(list_actual_evol[-1])-float(len(list_I))), float(list_actual_evol[t]),float(len(list_I))

                    list_single_t_evolution.append(float(len(list_I)))

                    t += 1

                    ######## end t loop

                ########OJO~!!!!!!!!!! COMENTAR ESTO CUANDO ESTOY BARRIENDO TOOOOOOOOOODO EL ESPACIO DE PARAMETROS
            # file4 = open(file_name_indiv_evol,'at')
            #for i in range(len(list_single_t_evolution)):  #time step by time step
            #  print >> file4, i,list_single_t_evolution[i], prob_infection, prob_Immune
            #file4.close()
            ########################################################

                list_lists_t_evolutions.append(list_single_t_evolution)

                list_dist_fixed_parameters.append(
                    compare_real_evol_vs_simus_to_be_called.compare_two_curves(
                        list_actual_evol, list_single_t_evolution))

                list_abs_dist_at_ending_point_fixed_parameters.append(
                    abs(list_single_t_evolution[-1] - list_actual_evol[-1])
                )  # i save the distance at the ending point between the current simu and actual evol
                list_dist_at_ending_point_fixed_parameters.append(
                    list_single_t_evolution[-1] - list_actual_evol[-1]
                )  # i save the distance at the ending point between the current simu and actual evol
                list_final_num_infected.append(list_single_t_evolution[-1])

            ######## end loop Niter

            list_pair_dist_std_delta_end = []

            list_pair_dist_std_delta_end.append(
                numpy.mean(list_dist_fixed_parameters)
            )  # average dist between the curves over Niter
            list_pair_dist_std_delta_end.append(
                numpy.std(list_dist_fixed_parameters))

            list_pair_dist_std_delta_end.append(
                numpy.mean(list_abs_dist_at_ending_point_fixed_parameters))

            if for_testing_fixed_set == "NO":
                file3 = open(output_file3, 'at')  # i print out the landscape
                print >> file3, prob_infection, prob_Immune, numpy.mean(
                    list_abs_dist_at_ending_point_fixed_parameters
                ), numpy.mean(list_dist_fixed_parameters), numpy.mean(
                    list_final_num_infected), numpy.std(
                        list_final_num_infected)
                file3.close()

            if (
                    numpy.mean(list_abs_dist_at_ending_point_fixed_parameters)
            ) <= delta_end:  # i only consider situations close enough at the ending point

                dict_filenames_tot_distance[
                    output_file2] = list_pair_dist_std_delta_end

            file2 = open(output_file2, 'at')
            for s in range(len(list_single_t_evolution)):
                list_fixed_t = []
                for iter in range(Niter):
                    list_fixed_t.append(list_lists_t_evolutions[iter][s])
                print >> file2, s, numpy.mean(list_fixed_t)
            file2.close()

            print "printed out: ", output_file2
            # raw_input()

            if envelopes == "YES":
                calculate_envelope_set_curves.calculate_envelope(
                    list_lists_t_evolutions, percent_envelope, "Infection",
                    [prob_infection, prob_Immune])

            if for_testing_fixed_set == "YES":

                num_valid_endings = 0.
                for item in list_abs_dist_at_ending_point_fixed_parameters:
                    if item <= delta_end:  # i count how many realizations i get close enough at the ending point
                        num_valid_endings += 1.

                print "average distance of the optimum in the testing segment:", numpy.mean(
                    list_dist_fixed_parameters), numpy.std(
                        list_dist_fixed_parameters
                    ), list_dist_fixed_parameters, "\n"
                print "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter, "mean ending dist:", numpy.mean(
                    list_dist_at_ending_point_fixed_parameters
                ), "SD final dist", numpy.std(
                    list_dist_at_ending_point_fixed_parameters
                ), list_dist_at_ending_point_fixed_parameters, "\n"

                histogram_filename = "../Results/weight_shifts/histogr_raw_distances_ending_infection_p" + str(
                    prob_infection) + "_" + "Immune" + str(
                        prob_Immune) + "_" + str(Niter) + "iter_day" + str(
                            cutting_day) + ".dat"
                histograma_gral_negv_posit.histograma(
                    list_dist_at_ending_point_fixed_parameters,
                    histogram_filename)

                histogram_filename2 = "../Results/weight_shifts/histogr_sum_dist_traject_infection_p" + str(
                    prob_infection) + "_" + "Immune" + str(
                        prob_Immune) + "_" + str(Niter) + "iter_day" + str(
                            cutting_day) + ".dat"

                histograma_bines_gral.histograma_bins(
                    list_dist_fixed_parameters, Nbins, histogram_filename2)

                output_file10 = "../Results/weight_shifts/Summary_results_training_segment_infection_p" + str(
                    prob_infection) + "_" + "Immune" + str(
                        prob_Immune) + "_" + str(Niter) + "iter_day" + str(
                            cutting_day) + ".dat"
                file10 = open(output_file10, 'wt')

                print >> file10, "Summary results from train-testing infection with", Niter, "iter, and with values for the parameters:  prob_inf ", prob_infection, " prob immune: ", prob_Immune, "\n"

                print >> file10, "average distance of the optimum in the testing segment:", numpy.mean(
                    list_dist_fixed_parameters), numpy.std(
                        list_dist_fixed_parameters
                    ), list_dist_fixed_parameters, "\n"
                print >> file10, "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter, "mean ending dist:", numpy.mean(
                    list_dist_at_ending_point_fixed_parameters
                ), "SD final dist", numpy.std(
                    list_dist_at_ending_point_fixed_parameters
                ), list_dist_at_ending_point_fixed_parameters, "\n"

                print >> file10, "written optimum train_test evolution file:", output_file2
                print >> file10, "written histogram file: ", histogram_filename

                file10.close()

                print "written Summary file: ", output_file10
                print "written histogram file: ", histogram_filename
                print "written histogram file: ", histogram_filename2

            prob_infection += delta_prob
        prob_Immune += delta_prob_Immune

    if for_testing_fixed_set == "NO":  # only if i am exploring the whole landscape, i need to call this function, otherwise, i already know the optimum
        compare_real_evol_vs_simus_to_be_called.pick_minimum_same_end(
            dict_filenames_tot_distance, "Infection_weight", all_team, Niter,
            None)  # last argument doesnt apply (cutting day)

    if for_testing_fixed_set == "NO":
        print "written landscape file:", output_file3
def main(graph_name):

    cutting_day = 175  # to separate   training-testing

    G = nx.read_gml(graph_name)

    list_id_weekends_T3 = look_for_T3_weekends(
        G
    )  # T3 doesnt share fellows in the weekend  (but they are the exception)

    all_team = "NO"  # as adopters or not
    Nbins = 20  # for the histogram of sum of distances

    dir_real_data = '../Results/'

    dir = "../Results/weight_shifts/infection/"

    delta_end = 3.  # >= than + or -  dr difference at the end of the evolution (NO realization ends up closer than this!!!! if 2, i get and empty list!!!)

    Niter_training = 1000

    fixed_param = ""  #"FIXED_Pimm0_"    # or ""  # for the Results file that contains the sorted list of best parameters

    output_file3 = "../Results/weight_shifts/Landscape_parameters_infection_train_test_" + str(
        Niter_training) + "iter.dat"
    file3 = open(output_file3, 'wt')

    file3.close()

    ######################################################################################
    #  I read the file of the actual evolution of the idea spreading in the hospital:   ##
    ######################################################################################

    if all_team == "YES":
        print "remember that now i use the file of adopters without fellows\n../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"
        exit()

    else:
        filename_actual_evol = "../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"

    file1 = open(
        filename_actual_evol, 'r'
    )  ## i read the file:  list_dates_and_names_current_adopters.txt  (created with: extract_real_evolution_number_adopters.py)
    list_lines_file = file1.readlines()

    list_actual_evol = []
    for line in list_lines_file:  # [1:]:   # i exclude the first row

        num_adopters = float(line.split(" ")[1])
        list_actual_evol.append(num_adopters)

    list_actual_evol_training = list_actual_evol[:cutting_day]
    #   list_actual_evol_testing=list_actual_evol[(cutting_day-1):]   #i dont use this

    ##################################################################

    #../Results/network_final_schedule_withTeam3/infection/Average_time_evolution_Infection_p0.9_Immune0.5_1000iter_2012.dat

    prob_min = 0.0
    prob_max = 1.01
    delta_prob = 0.1

    prob_Immune_min = 0.00
    prob_Immune_max = 1.01
    delta_prob_Immune = 0.1

    list_dist_at_ending_point_fixed_parameters = []
    dict_filenames_tot_distance = {
    }  # i will save the filename as key and the tot distance from that curve to the original one
    dict_filenames_prod_distances = {}

    prob_Immune = prob_Immune_min
    while prob_Immune <= prob_Immune_max:

        print "prom Immune:", prob_Immune

        prob_infection = prob_min
        while prob_infection <= prob_max:

            print "  p:", prob_infection

            output_file2 = dir + "Average_time_evolution_Infection_training_p" + str(
                prob_infection) + "_" + "Immune" + str(
                    prob_Immune) + "_" + str(
                        Niter_training) + "iter_2012_avg_ic_day" + str(
                            cutting_day) + ".dat"
            #   file2 = open(output_file2,'wt')                                          I DONT NEED TO WRITE IT, COS I WILL USE THE WHOLE FILE FROM THE WHOLE FIT, WITH THE PARAMETER VALUES THAT THE TESTING-UP-TODAY-125 TELLS ME
            #  file2.close()

            # i create the empty list of list for the Niter temporal evolutions
            num_shifts = 0
            num_Drs = 0.
            for n in G.nodes():
                G.node[n]["status"] = "S"
                if G.node[n]['type'] == "shift":
                    num_shifts += 1
                else:
                    num_Drs += 1.

        #  list_final_I_values_fixed_p=[]  # i dont care about the final values right now, but about the whole time evol
            list_lists_t_evolutions = []

            list_dist_fixed_parameters = []
            list_dist_abs_at_ending_point_fixed_parameters = []
            list_final_num_infected = []

            for iter in range(Niter_training):

                #   print "     iter:",iter

                list_I = []  #list infected doctors
                list_ordering = []
                list_s = []
                list_A = []
                list_F = []

                ########### set I.C.

                max_order = 0
                for n in G.nodes():
                    G.node[n]["status"] = "S"  # all nodes are Susceptible
                    if G.node[n]['type'] == "shift":
                        list_s.append(n)
                        if G.node[n]['order'] > max_order:
                            max_order = G.node[n]['order']
                    else:
                        if G.node[n]['label'] == "Wunderink" or G.node[n][
                                "label"] == "Weiss":
                            G.node[n]["status"] = "I"
                            list_I.append(G.node[n]['label'])

                        if G.node[n]['type'] == "A":
                            list_A.append(n)

                        if G.node[n]['type'] == "F":
                            list_F.append(n)

                list_single_t_evolution = []
                list_single_t_evolution.append(
                    2.0)  # I always start with TWO infected doctors!!

                for n in G.nodes(
                ):  # i make some DOCTORs INMUNE  (anyone except Weiss and Wunderink)
                    if (G.node[n]['type'] == "A") or (G.node[n]['type']
                                                      == "F"):
                        if G.node[n]['label'] != "Wunderink" and G.node[n][
                                "label"] != "Weiss":
                            rand = random.random()
                            if rand < prob_Immune:
                                G.node[n]["status"] = "Immune"

            #   print max_order

            ################# the dynamics starts:

                t = 1
                while t < cutting_day:  # loop over shifts, in order   just until cutting day (training segment)
                    for n in G.nodes():
                        if G.node[n]['type'] == "shift" and G.node[n][
                                'order'] == t:

                            shift_lenght = int(G.node[n]['shift_lenght'])

                            if shift_lenght == 2 and n not in list_id_weekends_T3:
                                shift_lenght = 1  # because during weekends, the fellow does rounds one day with Att1 and the other day with Att2.  (weekend shifts for T3 are two day long, with no sharing fellows)
                            #  print "one-day weekend", G.node[n]['label'],G.node[n]['shift_lenght']

                            flag_possible_infection = 0
                            for doctor in G.neighbors(
                                    n
                            ):  #first i check if any doctor is infected in this shift
                                if G.node[doctor]["status"] == "I":
                                    flag_possible_infection = 1

                            if flag_possible_infection:
                                for doctor in G.neighbors(
                                        n
                                ):  # then the doctors in that shift, gets infected with prob_infection
                                    for i in range(shift_lenght):
                                        if G.node[doctor]["status"] == "S":
                                            rand = random.random()
                                            if rand < prob_infection:
                                                G.node[doctor]["status"] = "I"
                                                if G.node[doctor][
                                                        "type"] == "A":
                                                    list_I.append(
                                                        G.node[doctor]
                                                        ["label"])

                    list_single_t_evolution.append(float(
                        len(list_I)))  #/(len(list_A)+len(list_F)))

                    t += 1

                ######## end t loop

                list_lists_t_evolutions.append(list_single_t_evolution)

                list_dist_fixed_parameters.append(
                    compare_real_evol_vs_simus_to_be_called.compare_two_curves(
                        list_actual_evol_training, list_single_t_evolution))

                list_dist_abs_at_ending_point_fixed_parameters.append(
                    abs(list_single_t_evolution[-1] -
                        list_actual_evol_training[-1])
                )  # i save the distance at the ending point between the current simu and actual evol

                #  print "actual:",len(list_actual_evol_training),"  simu:",len(list_single_t_evolution)   # 125, 125

                list_final_num_infected.append(list_single_t_evolution[-1])

                list_dist_at_ending_point_fixed_parameters.append(
                    list_single_t_evolution[-1] - list_actual_evol_training[-1]
                )  # i save the distance at the ending point between the current simu and actual evol

            ######## end loop Niter for the training fase

            list_pair_dist_std_delta_end = []

            list_pair_dist_std_delta_end.append(
                numpy.mean(list_dist_fixed_parameters)
            )  # average dist between the curves over Niter
            list_pair_dist_std_delta_end.append(
                numpy.std(list_dist_fixed_parameters))

            list_pair_dist_std_delta_end.append(
                numpy.mean(list_dist_abs_at_ending_point_fixed_parameters))

            file3 = open(output_file3, 'at')  # i print out the landscape
            print >> file3, prob_infection, prob_Immune, numpy.mean(
                list_dist_abs_at_ending_point_fixed_parameters
            ), numpy.mean(list_dist_fixed_parameters), numpy.mean(
                list_final_num_infected
            ), numpy.std(list_final_num_infected), numpy.std(
                list_final_num_infected) / numpy.mean(list_final_num_infected)
            file3.close()

            histogram_filename = "../Results/weight_shifts/histogr_raw_distances_ending_test_train_infection_p" + str(
                prob_infection) + "_Immune" + str(prob_Immune) + "_" + str(
                    Niter_training) + "iter_day" + str(cutting_day) + ".dat"
            histograma_gral_negv_posit.histograma(
                list_dist_at_ending_point_fixed_parameters, histogram_filename)

            histogram_filename2 = "../Results/weight_shifts/histogr_sum_dist_traject_infection_training_p" + str(
                prob_infection
            ) + "_" + "Immune" + str(prob_Immune) + "_" + str(
                Niter_training) + "iter_day" + str(cutting_day) + ".dat"

            histograma_bines_gral.histograma_bins(list_dist_fixed_parameters,
                                                  Nbins, histogram_filename2)

            print "written histogram file: ", histogram_filename
            print "written histogram file: ", histogram_filename2

            value = numpy.mean(list_dist_fixed_parameters) * numpy.mean(
                list_dist_abs_at_ending_point_fixed_parameters
            )  # if SD=0, it is a problem, because then that is the minimun value, but not the optimum i am looking for!!

            dict_filenames_prod_distances[output_file2] = value

            if (
                    numpy.mean(list_dist_abs_at_ending_point_fixed_parameters)
            ) <= delta_end:  # i only consider situations close enough at the ending point

                dict_filenames_tot_distance[
                    output_file2] = list_pair_dist_std_delta_end

                print numpy.mean(list_dist_abs_at_ending_point_fixed_parameters
                                 ), "added scenario:", output_file2

        # file2 = open(output_file2,'at')
        #for s in range(len(list_single_t_evolution)):
        #   list_fixed_t=[]
        #  for iter in range (Niter_training):
        #     list_fixed_t.append(list_lists_t_evolutions[iter][s])
        #print >> file2, s,numpy.mean(list_fixed_t)
        #file2.close()

            prob_infection += delta_prob
        prob_Immune += delta_prob_Immune

    list_order_dict = compare_real_evol_vs_simus_to_be_called.pick_minimum_same_end(
        dict_filenames_tot_distance, "Infection_training_weight", all_team,
        Niter_training, cutting_day)

    # it returns a list of tuples like this :  ('../Results/network_final_schedule_withTeam3_local/infection/Average_time_evolution_Infection_training_p0.7_Immune0.0_2iter_2012.dat', [2540.0, 208.0, 1.0])  the best set of parameters  being the fist one of the elements in the list.

    string_name = "infection_training_" + fixed_param + str(
        Niter_training) + "iter_day" + str(
            cutting_day
        ) + ".dat"  # for the "Results" file with the sorted list of files

    list_order_dict2 = compare_real_evol_vs_simus_to_be_called.pick_minimum_prod_distances(
        dict_filenames_prod_distances, string_name, all_team, Niter_training,
        cutting_day)

    optimum_filename = list_order_dict[0][0]
    prob_infection = float(list_order_dict[0][0].split("_p")[1].split("_")[0])
    prob_Immune = float(
        list_order_dict[0][0].split("_Immune")[1].split("_")[0])

    print "Optimum parameters (old method) at day", cutting_day, " are: p=", prob_infection, " and Pimmune=", prob_Immune

    #  i already know the optimum, now i run the dynamics with those values, starting from the average state on the cutting point, and test:

    optimum_filename = list_order_dict2[0][0]
    prob_infection = float(list_order_dict2[0][0].split("_p")[1].split("_")[0])
    prob_Immune = float(
        list_order_dict2[0][0].split("_Immune")[1].split("_")[0])

    print "Optimum parameters (product of distances along_traject and at the end) at day", cutting_day, " are: p=", prob_infection, " and Pimmune=", prob_Immune

    print "Run that simulation with the optimum parameter set:", optimum_filename

    print "printed out landscape file:", output_file3

    output_file10 = "../Results/weight_shifts/Summary_results_training_segment_infection_p" + str(
        prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" + str(
            Niter_training) + "iter_avg_ic_day" + str(cutting_day) + ".dat"
    file10 = open(output_file10, 'wt')

    print >> file10, "Summary results from train-testing persuasion with", Niter_training, "iter , using all the individual cutting points as IC, and with values for the parameters:  prob_inf ", prob_infection, " prob immune: ", prob_Immune, "\n"

    print >> file10, "Look for the file (or run that simulation) with the optimum parameter set:", optimum_filename
    file10.close()
def main(graph_name):

    G = nx.read_gml(graph_name)

    list_id_weekends_T3 = look_for_T3_weekends(
        G
    )  # T3 doesnt share fellows in the weekend  (but they are the exception)

    percent_envelope = 95.
    Niter = 1000

    cutting_day = 125

    Nbins = 200  # for the histogram of sum of distances

    for_testing_fixed_set = "YES"  # when YES, fixed values param and get all statistics on final distances etc

    envelopes = "NO"

    delta_end = 3.  # >= than + or -  dr difference at the end of the evolution

    dir_real_data = '../Results/'

    all_team = "NO"  # as adopters or not  NO now means i use the file without fellows, only attendings

    if for_testing_fixed_set == "NO":
        output_file3 = "../Results/weight_shifts/Landscape_parameters_persuasion_" + str(
            Niter) + "iter_A_F_inferred.dat"
        file3 = open(output_file3, 'wt')
        file3.close()

######################################################################################
#  I read the file of the actual evolution of the idea spreading in the hospital:   ##
######################################################################################

    if all_team == "YES":
        print "remember that now i use the file of adopters without fellows\n../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"
        exit()

    else:
        filename_actual_evol = "../Results/Actual_evolution_adopters_from_inference.dat"

    file1 = open(
        filename_actual_evol, 'r'
    )  ## i read the file:  list_dates_and_names_current_adopters.txt  (created with: extract_real_evolution_number_adopters.py)
    list_lines_file = file1.readlines()

    list_actual_evol = []
    for line in list_lines_file:  # [1:]:   # i exclude the first row

        num_adopters = float(line.split("\t")[1])
        list_actual_evol.append(num_adopters)

##################################################################

#../Results/weight_shifts/persuasion/Time_evolutions_Persuasion_training_alpha0.2_damping0.0_mutual_encourg0.5_threshold0.7_unif_distr_1000iter_2012_seed31Oct_finalnetwork_day125.dat

#../Results/weight_shifts/persuasion/Time_evolutions_Persuasion_training_alpha0.5_damping0.4_mutual_encourg0.5_threshold0.5_unif_distr_1000iter_2012_seed31Oct_finalnetwork_day125.dat
#OJO!!! NECESITO DOS DECIMALES SIEMPRE, PARA QUE CUADRE CON EL NOMBRE DE LOS SUB-DIRECTORIOS DONDE LO GUARDO

    alpha_F_min = 0.10  #0.15   # alpha=0: nobody changes their mind
    alpha_F_max = 0.101  #0.351
    delta_alpha_F = 0.10  #AVOID 1.0 OR THE DYNAMICS GETS TOTALLY STUCK AND IT IS NOT ABLE TO PREDICT SHIT!

    min_damping = 0.00  #0.0     #its harder to go back from YES to NO again. =1 means no effect, =0.5 half the movement from Y->N than the other way around, =0 never go back from Y to N
    max_damping = 0.001  #0.451
    delta_damping = 0.10

    min_mutual_encouragement = 0.000  #0.50  # when two Adopters meet, they convince each other even more
    max_mutual_encouragement = 0.001  # 0.51   # KEEP THIS FIXED VALUES FOR NOW
    delta_mutual_encouragement = 0.10

    threshold_min = 0.50  #0.50  # larger than, to be an Adopte
    threshold_max = 0.501  # 0.51    # KEEP THIS FIXED VALUES FOR NOW
    delta_threshold = 0.10  # AVOID 1.0 OR THE DYNAMICS GETS TOTALLY STUCK AND IT IS NOT ABLE TO PREDICT SHIT

    print "\n\nPersuasion process on network, with Niter:", Niter

    dict_filenames_tot_distance = {
    }  # i will save the filename as key and the tot distance from that curve to the original one

    threshold = threshold_min
    while threshold <= threshold_max:
        print "thershold:", threshold

        alpha_F = alpha_F_min
        while alpha_F <= alpha_F_max:  # i explore all the parameter space, and create a file per each set of valuesllkl
            alpha_A = 1.0 * alpha_F
            print "  alpha_F:", alpha_F

            mutual_encouragement = min_mutual_encouragement
            while mutual_encouragement <= max_mutual_encouragement:
                print "    mutual_encouragement:", mutual_encouragement

                damping = min_damping
                while damping <= max_damping:
                    print "      damping:", damping

                    dir = "../Results/weight_shifts/persuasion/alpha%.2f_damping%.2f/" % (
                        alpha_F, damping)

                    if for_testing_fixed_set == "YES":
                        output_file = dir + "Time_evol_Persuasion_alpha" + str(
                            alpha_F
                        ) + "_damping" + str(damping) + "_mutual" + str(
                            mutual_encouragement
                        ) + "_threshold" + str(threshold) + "_" + str(
                            Niter) + "iter_alphaA_eq_alphaF_A_F_inferred.dat"

                    else:
                        output_file = dir + "Time_evol_Persuasion_alpha" + str(
                            alpha_F
                        ) + "_damping" + str(damping) + "_mutual" + str(
                            mutual_encouragement
                        ) + "_threshold" + str(threshold) + "_" + str(
                            Niter) + "iter_alphaA_eq_alphaF_A_F_inferred.dat"

                    file = open(output_file, 'wt')
                    file.close()

                    time_evol_number_adopters_ITER = [
                    ]  # list of complete single realizations of the dynamics
                    list_dist_fixed_parameters = []
                    list_dist_fixed_parameters_testing_segment = []
                    list_dist_abs_at_ending_point_fixed_parameters = []
                    list_dist_at_ending_point_fixed_parameters = []
                    list_final_num_adopt = []
                    list_abs_dist_point_by_point_indiv_simus_to_actual = []
                    list_dist_point_by_point_indiv_simus_to_actual = []

                    #list_abs_dist_at_cutting_day=[]
                    for iter in range(Niter):

                        print "         ", iter
                        list_t = []

                        time_evol_number_adopters = [
                        ]  # for a single realization of the dynamics

                        num_adopters, seed_shift, max_shift = set_ic(
                            G, threshold
                        )  # i establish who is Adopter and NonAdopter initially, and count how many shifts i have total

                        time_evol_number_adopters.append(float(num_adopters))
                        # print "initial number of adopters:", num_adopters
                        list_t.append(0)

                        ########OJO~!!!!!!!!!! COMENTAR ESTO CUANDO ESTOY BARRIENDO TOOOOOOOOOODO EL ESPACIO DE PARAMETROS
                        #                file4 = open(output_file.split('.dat')[0]+"_indiv_iter"+str(iter)+".dat",'wt')
                        #               file4.close()
                        ##########################################

                        # the dynamics starts:
                        t = int(seed_shift
                                ) + 1  # the first time step is just IC.???

                        while t <= max_shift:  # loop over shifts, in chronological order  (the order is the day index since seeding_day)
                            # print 't:',t
                            list_t.append(t)
                            for n in G.nodes():
                                if G.node[n]['type'] == "shift" and G.node[n][
                                        'order'] == t:  # i look for the shift corresponding to that time step

                                    shift_length = int(
                                        G.node[n]['shift_length'])

                                    if shift_length == 2 and n not in list_id_weekends_T3:
                                        shift_length = 1  # because during weekends, the fellow does rounds one day with Att1 and the other day with Att2.  (weekend shifts for T3 are two day long, with no sharing fellows)

#    print "one-day weekend", G.node[n]['label'],G.node[n]['shift_length']

                                    flag_possible_persuasion = 0
                                    for doctor in G.neighbors(n):
                                        if G.node[doctor][
                                                "status"] == "Adopter":  #first i check if any doctor is an adopter in this shift
                                            flag_possible_persuasion = 1
                                            break

                                    if flag_possible_persuasion == 1:
                                        list_doctors = []
                                        for doctor in G.neighbors(
                                                n
                                        ):  # for all drs in that shift
                                            list_doctors.append(doctor)

                                        pairs = itertools.combinations(
                                            list_doctors, 2
                                        )  # cos the shift can be 2 but also 3 doctors
                                        for pair in pairs:
                                            doctor1 = pair[0]
                                            doctor2 = pair[1]

                                            if G.node[doctor1][
                                                    'status'] != G.node[doctor2][
                                                        'status']:  # if they think differently,
                                                # there will be persuasion
                                                persuasion(
                                                    G, damping, doctor1,
                                                    doctor2, alpha_A, alpha_F,
                                                    threshold, shift_length
                                                )  # i move their values of opinion
                                                update_opinions(
                                                    G, threshold, doctor1,
                                                    doctor2
                                                )  #  i update status and make sure the values of the vectors stay between [0,1]

                                            else:  # if two Adopters meet, they encourage each other (if two NonAdopters, nothing happens)

                                                mutual_reinforcement(
                                                    G, mutual_encouragement,
                                                    doctor1, doctor2,
                                                    shift_length)
                                # else:
                                #   print "  no persuasion possible during shift (no adopters present)!"

                            list_Adopters = [
                            ]  #count how many i have at this time
                            for n in G.nodes():
                                try:
                                    if G.node[n]["status"] == "Adopter":
                                        if G.node[n][
                                                "label"] not in list_Adopters:  # and G.node[n]["type"]=="A":
                                            list_Adopters.append(
                                                G.node[n]["label"])
                                except:
                                    pass  # if the node is a shift, it doesnt have a 'status' attribute

                        #  if for_testing_fixed_set=="YES":
                        #    if t==cutting_day:
                        #      list_abs_dist_at_cutting_day.append(abs(float(list_actual_evol[-1])-float(len(list_Adopters))))
                        #     print abs(float(list_actual_evol[-1])-float(len(list_Adopters))), float(list_actual_evol[t]),float(len(list_Adopters))

                            time_evol_number_adopters.append(
                                float(len(list_Adopters)))

                            t += 1

                        ############## end while loop over t

                        time_evol_number_adopters_ITER.append(
                            time_evol_number_adopters)

                        list_dist_fixed_parameters.append(
                            compare_real_evol_vs_simus_to_be_called.
                            compare_two_curves(list_actual_evol,
                                               time_evol_number_adopters))
                        list_dist_fixed_parameters_testing_segment.append(
                            compare_real_evol_vs_simus_to_be_called.
                            compare_two_curves_testing_segment(
                                list_actual_evol, time_evol_number_adopters,
                                cutting_day))

                        list_dist_abs_at_ending_point_fixed_parameters.append(
                            abs(time_evol_number_adopters[-1] -
                                list_actual_evol[-1]))
                        list_dist_at_ending_point_fixed_parameters.append(
                            time_evol_number_adopters[-1] -
                            list_actual_evol[-1])

                        list_final_num_adopt.append(
                            time_evol_number_adopters[-1])

                        ########OJO~!!!!!!!!!! COMENTAR ESTO CUANDO ESTOY BARRIENDO TOOOOOOOOOODO EL ESPACIO DE PARAMETROS
                        #  file4 = open(output_file.split('.dat')[0]+"_indiv_iter"+str(iter)+".dat",'at')
                        # for i in range(len(time_evol_number_adopters)):  #ime step by time step
                        #   print >> file4, i,time_evol_number_adopters[i], alpha_F,damping,mutual_encouragement
                        #file4.close()
                        ########################################################

                        for index in range(len(time_evol_number_adopters)):

                            list_abs_dist_point_by_point_indiv_simus_to_actual.append(
                                abs(time_evol_number_adopters[index] -
                                    list_actual_evol[index]))
                            list_dist_point_by_point_indiv_simus_to_actual.append(
                                time_evol_number_adopters[index] -
                                list_actual_evol[index])

                    #######################end loop over Niter

                    list_pair_dist_std_delta_end = []

                    list_pair_dist_std_delta_end.append(
                        numpy.mean(list_dist_fixed_parameters)
                    )  # average dist between the curves over Niter
                    list_pair_dist_std_delta_end.append(
                        numpy.std(list_dist_fixed_parameters))

                    list_pair_dist_std_delta_end.append(
                        numpy.mean(
                            list_dist_abs_at_ending_point_fixed_parameters))

                    if for_testing_fixed_set == "NO":
                        file3 = open(output_file3,
                                     'at')  # i print out the landscape
                        print >> file3, alpha_F, damping, mutual_encouragement, threshold, numpy.mean(
                            list_dist_abs_at_ending_point_fixed_parameters
                        ), numpy.mean(list_dist_fixed_parameters), numpy.mean(
                            list_final_num_adopt), numpy.std(
                                list_final_num_adopt
                            ), numpy.std(list_final_num_adopt) / numpy.mean(
                                list_final_num_adopt)
                        file3.close()

                    if (
                            numpy.mean(
                                list_dist_abs_at_ending_point_fixed_parameters)
                    ) <= delta_end:  # i only consider situations close enough at the ending point

                        dict_filenames_tot_distance[
                            output_file] = list_pair_dist_std_delta_end

                    file = open(output_file, 'wt')
                    for i in range(len(time_evol_number_adopters)
                                   ):  #time step by time step
                        list_fixed_t = []
                        for iteracion in range(
                                Niter
                        ):  #loop over all independent iter of the process
                            list_fixed_t.append(
                                time_evol_number_adopters_ITER[iteracion][i]
                            )  # i collect all values for the same t, different iter

                        print >> file, list_t[i], numpy.mean(
                            list_fixed_t), numpy.std(
                                list_fixed_t
                            ), alpha_F, damping, mutual_encouragement
                    file.close()

                    print "printed out:  ", output_file

                    if envelopes == "YES":
                        calculate_envelope_set_curves.calculate_envelope(
                            time_evol_number_adopters_ITER, percent_envelope,
                            "Persuasion", [
                                alpha_F, damping, mutual_encouragement,
                                threshold
                            ])

                    if for_testing_fixed_set == "YES":

                        num_valid_endings = 0.
                        for item in list_dist_abs_at_ending_point_fixed_parameters:
                            if item <= delta_end:  # i count how many realizations i get close enough at the ending point
                                num_valid_endings += 1.

                        print "average distance of the optimum in the testing segment:", numpy.mean(
                            list_dist_fixed_parameters), numpy.std(
                                list_dist_fixed_parameters
                            ), list_dist_fixed_parameters, "\n"
                        print "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter, "mean ending dist:", numpy.mean(
                            list_dist_at_ending_point_fixed_parameters
                        ), "SD final dist", numpy.std(
                            list_dist_at_ending_point_fixed_parameters
                        ), list_dist_at_ending_point_fixed_parameters

                        histogram_filename = "../Results/weight_shifts/histogr_raw_distances_ending_persuasion_alpha" + str(
                            alpha_F) + "_damping" + str(
                                damping) + "_mutual_encourg" + str(
                                    mutual_encouragement
                                ) + "_threshold" + str(threshold) + "_" + str(
                                    Niter) + "iter_alphaA_eq_alphaF_day" + str(
                                        cutting_day) + "_A_F_inferred.dat"
                        histograma_gral_negv_posit.histograma(
                            list_dist_at_ending_point_fixed_parameters,
                            histogram_filename)

                        histogram_filename2 = "../Results/weight_shifts/histogr_sum_dist_traject_persuasion_alpha" + str(
                            alpha_F) + "_damping" + str(
                                damping) + "_mutual_encourg" + str(
                                    mutual_encouragement
                                ) + "_threshold" + str(threshold) + "_" + str(
                                    Niter) + "iter_alphaA_eq_alphaF_day" + str(
                                        cutting_day) + "_A_F_inferred.dat"

                        histograma_bines_gral.histograma_bins(
                            list_dist_fixed_parameters, Nbins,
                            histogram_filename2)

                        histogram_filename3 = "../Results/weight_shifts/histogr_sum_dist_testing_segment_persuasion_alpha" + str(
                            alpha_F) + "_damping" + str(
                                damping) + "_mutual_encourg" + str(
                                    mutual_encouragement
                                ) + "_threshold" + str(threshold) + "_" + str(
                                    Niter) + "iter_alphaA_eq_alphaF_day" + str(
                                        cutting_day) + "_A_F_inferred.dat"

                        histograma_bines_gral.histograma_bins_zero(
                            list_dist_fixed_parameters_testing_segment, Nbins,
                            histogram_filename3)

                        histogram_filename4 = "../Results/weight_shifts/histogr_abs_dist_point_by_point_persuasion_alpha" + str(
                            alpha_F) + "_damping" + str(
                                damping) + "_mutual_encourg" + str(
                                    mutual_encouragement
                                ) + "_threshold" + str(threshold) + "_" + str(
                                    Niter) + "iter_alphaA_eq_alphaF_day" + str(
                                        cutting_day) + "_A_F_inferred.dat"

                        histograma_gral_negv_posit.histograma(
                            list_abs_dist_point_by_point_indiv_simus_to_actual,
                            histogram_filename4)

                        histogram_filename5 = "../Results/weight_shifts/histogr_dist_point_by_point_persuasion_alpha" + str(
                            alpha_F) + "_damping" + str(
                                damping) + "_mutual_encourg" + str(
                                    mutual_encouragement
                                ) + "_threshold" + str(threshold) + "_" + str(
                                    Niter) + "iter_alphaA_eq_alphaF_day" + str(
                                        cutting_day) + "_A_F_inferred.dat"

                        histograma_gral_negv_posit.histograma(
                            list_dist_point_by_point_indiv_simus_to_actual,
                            histogram_filename5)

                        output_file10 = "../Results/weight_shifts/Summary_results_persuasion_alpha" + str(
                            alpha_F) + "_damping" + str(
                                damping) + "_mutual_encourg" + str(
                                    mutual_encouragement
                                ) + "_threshold" + str(threshold) + "_" + str(
                                    Niter) + "iter_alphaA_eq_alphaF_day" + str(
                                        cutting_day) + "_A_F_inferred.dat"
                        file10 = open(output_file10, 'wt')

                        print >> file10, "Summary results from best fit persuasion with", Niter, "iter, and with values for the parameters:  alpha ", alpha_F, " damping: ", damping, " mutual_encourg: ", mutual_encouragement, " threshold:", threshold

                        print >> file10, "average distance of the optimum in the testing segment:", numpy.mean(
                            list_dist_fixed_parameters), numpy.std(
                                list_dist_fixed_parameters
                            ), list_dist_fixed_parameters
                        print >> file10, "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter, "mean ending dist:", numpy.mean(
                            list_dist_at_ending_point_fixed_parameters
                        ), "SD final dist", numpy.std(
                            list_dist_at_ending_point_fixed_parameters
                        ), list_dist_at_ending_point_fixed_parameters

                        print >> file10, "written optimum train_test evolution file:", output_file
                        print >> file10, "written histogram file: ", histogram_filename
                        print >> file10, "written histogram file: ", histogram_filename2

                        file10.close()

                        print "written optimum train_test evolution file:", output_file

                        print "written summary file: ", output_file10

                    damping += delta_damping
                mutual_encouragement += delta_mutual_encouragement
            alpha_F += delta_alpha_F
        threshold += delta_threshold

    if for_testing_fixed_set == "NO":  # only if i am exploring the whole landscape, i need to call this function, otherwise, i already know the optimum
        compare_real_evol_vs_simus_to_be_called.pick_minimum_same_end(
            dict_filenames_tot_distance, "Persuasion_weight", all_team, Niter,
            None)  #last argument, cutting day (it doesnt apply)

    if for_testing_fixed_set == "NO":
        print "written landscape file:", output_file3
def main(graph_name):
 

   G = nx.read_gml(graph_name)
 
   list_id_weekends_T3=look_for_T3_weekends(G)  # T3 doesnt share fellows in the weekend  (but they are the exception)



   cutting_day=175  # to separate   training-testing

   Niter_training=1000
  

   delta_end=3  # >= than + or -  dr difference at the end of the evolution

   dir_real_data='../Results/'
   dir="../Results/weight_shifts/persuasion/"  


   all_team="NO"   # as adopters or not
   Nbins=20   # for the histogram of sum of distances


   fixed_param="FIXED_mutual0.5_damping.5_"    # or ""  # for the Results file that contains the sorted list of best parameters




  # fixed_parameters="mutual_encoug0.5_threshold0.5"   # for the Landscape text file CHANGE PARAMETERS ACCORDINGLY!!!

#output_file3="../Results/weight_shifts/Landscape_parameters_persuasion_train_test_"+str(fixed_parameters)+"_"+str(Niter_training)+"iter.dat"
   output_file3="../Results/weight_shifts/Landscape_parameters_persuasion_train_FIXED_damping0.1_threshold0.7_"+str(Niter_training)+"iter_alphaA_eq_alphaF.dat"  
   file3 = open(output_file3,'wt')        
   file3.close()

 


######################################################################################
#  I read the file of the actual evolution of the idea spreading in the hospital:   ##
######################################################################################



   if all_team=="YES":    
      print "remember that now i use the file of adopters without fellows\n../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"
      exit()

   else:
      filename_actual_evol="../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"
  


   file1=open(filename_actual_evol,'r')         ## i read the file:  list_dates_and_names_current_adopters.txt  (created with: extract_real_evolution_number_adopters.py)
   list_lines_file=file1.readlines()
            

   list_actual_evol=[]  
   for line in list_lines_file:      # [1:]:   # i exclude the first row   
     
      num_adopters= float(line.split(" ")[1])          
      list_actual_evol.append(num_adopters)

   list_actual_evol_training=list_actual_evol[:cutting_day]

##################################################################


#../Results/network_final_schedule_withTeam3/Time_evolutions_Persuasion_alpha0.2_damping0.0_mutual_encourg0.7_threshold0.4_unif_distr_50iter_2012_seed31Oct_finalnetwork.dat

 
   alpha_F_min=0.10   #   # alpha=0: nobody changes their mind
   alpha_F_max=0.9    
   delta_alpha_F=0.10    #AVOID 1.0 OR THE DYNAMICS GETS TOTALLY STUCK AND IT IS NOT ABLE TO PREDICT SHIT!
   

   min_damping=0.500   #0.0     #its harder to go back from YES to NO again. =1 means no effect, =0.5 half the movement from Y->N than the other way around, =0 never go back from Y to N
   max_damping=0.501    #0.451
   delta_damping=0.10  
   
   


   min_mutual_encouragement=0.50   #  # when two Adopters meet, they convince each other even more
   max_mutual_encouragement=0.501   
   delta_mutual_encouragement=0.10
   
   
   threshold_min=0.10   #  # larger than, to be an Adopter
   threshold_max=0.901 
   delta_threshold=0.10   # AVOID 1.0 OR THE DYNAMICS GETS TOTALLY STUCK AND IT IS NOT ABLE TO PREDICT SHIT
 


   
   
   print "\n\nPersuasion process on network, with Niter:",Niter_training
   
   
   dict_filenames_tot_distance={}   # i will save the filename as key and the tot distance from that curve to the original one
   dict_filenames_prod_distances={}   


  

   threshold=threshold_min
   while   threshold<= threshold_max:
      print   "thershold:",threshold

      alpha_F=alpha_F_min
      while alpha_F<= alpha_F_max:            # i explore all the parameter space, and create a file per each set of values
        alpha_A=1.0*alpha_F
        print "  alpha_F:",alpha_F

        mutual_encouragement=min_mutual_encouragement  
        while  mutual_encouragement <= max_mutual_encouragement:
          print "    mutual_encouragement:",mutual_encouragement

          damping=min_damping
          while   damping <= max_damping:
            print "      damping:",damping


         
#            dir="../Results/weight_shifts/persuasion/alpha%.2f_damping%.2f/"  % (alpha_F, damping )
           
            output_file=dir+"Time_evolutions_Persuasion_training_alpha"+str(alpha_F)+"_damping"+str(damping)+"_mutual_encourg"+str(mutual_encouragement)+"_threshold"+str(threshold)+"_unif_distr_"+str(Niter_training)+"iter_alphaA_eq_alphaF"+"_"+str(cutting_day)+".dat"         


           # file = open(output_file,'wt')     # i am not saving the train file, because i will just want to know 
            #file.close()          # the optimum parameter set and go look for the whole-250-day file
            


            time_evol_number_adopters_ITER=[]  # list of complete single realizations of the dynamics
            list_dist_fixed_parameters=[]
            list_dist_at_ending_point_fixed_parameters=[]
            list_dist_abs_at_ending_point_fixed_parameters=[]

           
            list_networks_at_cutting_day=[]

            list_final_num_adopt=[]


            for iter in range(Niter_training):

               # print "         ",iter
                list_t=[]
           
                time_evol_number_adopters=[]   # for a single realization of the dynamics

               


                num_adopters , seed_shift ,max_shift= set_ic(G,threshold)   # i establish who is Adopter and NonAdopter initially, and count how many shifts i have total

                time_evol_number_adopters.append(float(num_adopters))               
                list_t.append(0)



                
               ########### the dynamics starts:                 
                t=int(seed_shift)+1   # the first time step is just IC.???


                while t< cutting_day:  # loop over shifts, in chronological order  (the order is the day index since seeding_day) 
                         
                    list_t.append(t)
                    for n in G.nodes():
                        if G.node[n]['type']=="shift" and G.node[n]['order']==t:  # i look for the shift corresponding to that time step                    

                            shift_lenght=int(G.node[n]['shift_lenght'])
                           
                            if shift_lenght==2 and n not in list_id_weekends_T3:
                               shift_lenght=1   # because during weekends, the fellow does rounds one day with Att1 and the other day with Att2.  (weekend shifts for T3 are two day long, with no sharing fellows)



                            flag_possible_persuasion=0
                            for doctor in G.neighbors(n):                               
                                if G.node[doctor]["status"]=="Adopter":   #first i check if any doctor is an adopter in this shift         
                                    flag_possible_persuasion=1                               
                                    break

                            if flag_possible_persuasion==1:
                                list_doctors=[]
                                for doctor in G.neighbors(n):   # for all drs in that shift
                                    list_doctors.append(doctor)
                                
                                
                                pairs=itertools.combinations(list_doctors,2)    # cos the shift can be 2 but also 3 doctors 
                                for pair in pairs:
                                    doctor1=pair[0]
                                    doctor2=pair[1]
                                                                                        
                                    if G.node[doctor1]['status'] != G.node[doctor2]['status']:  # if they think differently, 
                                                                                              # there will be persuasion
                                        persuasion(G,damping,doctor1,doctor2,alpha_A,alpha_F,threshold,shift_lenght)   # i move their values of opinion                  
                                        update_opinions(G,threshold,doctor1,doctor2) #  i update status and make sure the values of the vectors stay between [0,1] 
                                  
                                    else:  # if two Adopters meet, they encourage each other (if two NonAdopters, nothing happens)
                                   
                                       mutual_reinforcement(G,mutual_encouragement,doctor1,doctor2,shift_lenght)
                                  
                               
                    list_all_Adopters=[]  #including fellows        
                    list_Adopters=[]        #NOT including fellows 
                    for n in G.nodes():              
                        try:
                            if  G.node[n]["status"]=="Adopter":                                                    
                                if G.node[n]["label"] not in list_Adopters and G.node[n]["type"]=="A":
                                    list_Adopters.append(G.node[n]["label"])
                        except: pass  # if the node is a shift, it doesnt have a 'status' attribute


        
                   


                    time_evol_number_adopters.append(float(len(list_Adopters)))

                    t+=1
   

                ############## end while loop over t
               


               
                time_evol_number_adopters_ITER.append(time_evol_number_adopters)


                list_final_num_adopt.append(time_evol_number_adopters[-1])

               
                list_dist_fixed_parameters.append(compare_real_evol_vs_simus_to_be_called.compare_two_curves( list_actual_evol_training,time_evol_number_adopters))
               
                list_dist_abs_at_ending_point_fixed_parameters.append( abs(time_evol_number_adopters[-1]-list_actual_evol_training[-1]) )

                list_dist_at_ending_point_fixed_parameters.append( time_evol_number_adopters[-1]-list_actual_evol_training[-1]) 



               
              
             

            #######################   end loop Niter for the training fase


            list_pair_dist_std_delta_end=[]
        
            list_pair_dist_std_delta_end.append(numpy.mean(list_dist_fixed_parameters) )   # average dist between the curves over Niter
            list_pair_dist_std_delta_end.append(numpy.std(list_dist_fixed_parameters) )

            list_pair_dist_std_delta_end.append(numpy.mean(list_dist_abs_at_ending_point_fixed_parameters))

         

                     
            value=numpy.mean(list_dist_fixed_parameters) *numpy.mean(list_dist_abs_at_ending_point_fixed_parameters) # if SD=0, it is a problem, because then that is the minimun value, but not the optimum i am looking for!!
        
            dict_filenames_prod_distances[output_file]=  value                  



            file3 = open(output_file3,'at')          # i print out the landscape           
            print >> file3, alpha_F, damping, mutual_encouragement, threshold,numpy.mean(list_dist_abs_at_ending_point_fixed_parameters), numpy.mean(list_dist_fixed_parameters),  numpy.mean(list_final_num_adopt),numpy.std(list_final_num_adopt),  numpy.std(list_final_num_adopt)/numpy.mean(list_final_num_adopt)
            file3.close()




            histogram_filename="../Results/weight_shifts/histogr_raw_distances_ending_test_train_alpha"+str(alpha_F)+"_damping"+str(damping)+"_mutual_encourg"+str(mutual_encouragement)+"_threshold"+str(threshold)+"_unif_distr_"+str(Niter_training)+"iter_alphaA_eq_alphaF"+"_"+str(cutting_day)+".dat"     
            histograma_gral_negv_posit.histograma(list_dist_at_ending_point_fixed_parameters,histogram_filename)
            
            histogram_filename2="../Results/weight_shifts/histogr_sum_dist_traject_infection_training_alpha"+str(alpha_F)+"_damping"+str(damping)+"_mutual_encourg"+str(mutual_encouragement)+"_threshold"+str(threshold)+"_unif_distr_"+str(Niter_training)+"iter_alphaA_eq_alphaF"+"_"+str(cutting_day)+".dat"     
            
            histograma_bines_gral.histograma_bins(list_dist_fixed_parameters,Nbins,histogram_filename2)


            print  "written histogram file: ",histogram_filename
            print  "written histogram file: ",histogram_filename2


            if (numpy.mean(list_dist_abs_at_ending_point_fixed_parameters)) <= delta_end:  # i only consider situations close enough at the ending point   

               dict_filenames_tot_distance[output_file]=list_pair_dist_std_delta_end 


             



   
          #  file = open(output_file,'wt')        
           # for i in range(len(time_evol_number_adopters)):  #time step by time step
            #    list_fixed_t=[]
             #   for iteracion in range (Niter_training): #loop over all independent iter of the process
              #      list_fixed_t.append(time_evol_number_adopters_ITER[iteracion][i])  # i collect all values for the same t, different iter  

               # print >> file, list_t[i],numpy.mean(list_fixed_t),numpy.std(list_fixed_t), alpha_F,damping,mutual_encouragement       
            #file.close()

           

          
            damping += delta_damping
          mutual_encouragement += delta_mutual_encouragement
        alpha_F += delta_alpha_F
      threshold  += delta_threshold
    



   list_order_dict=  compare_real_evol_vs_simus_to_be_called.pick_minimum_same_end(dict_filenames_tot_distance,"Persuasion_training_land_weight",all_team,Niter_training,cutting_day)


  
   string_name="_persuasion_training_"+fixed_param+str(Niter_training)+"iter_"+str(cutting_day)+".dat"            # for the "Results" file with the sorted list of files
   
   list_order_dict2= compare_real_evol_vs_simus_to_be_called.pick_minimum_prod_distances(dict_filenames_prod_distances,string_name,all_team,Niter_training,cutting_day)

  


#./Results/network_final_schedule_withTeam3_local/Time_evolutions_Persuasion_alpha0.4_damping0.4_mutual_encourg0.6_threshold0.5_unif_distr_2iter_2012_seed31Oct_finalnetwork.dat


   optimum_filename=list_order_dict[0][0]


   print optimum_filename   
   alpha_F=float(list_order_dict[0][0].split("_alpha")[1].split("_")[0])
   alpha_A=0.5*alpha_F
   damping=float(list_order_dict[0][0].split("_damping")[1].split("_")[0])
   mutual_encouragement=float(list_order_dict[0][0].split("_mutual_encourg")[1].split("_")[0])
   threshold=float(list_order_dict[0][0].split("_threshold")[1].split("_")[0])
  
  
            
                

  
   print "Optimum (old method) alpha=", alpha_F, " damping=",damping," mutual encourag=",mutual_encouragement," threshold",threshold
   
  
  
   optimum_filename=list_order_dict2[0][0]

   print optimum_filename   
   alpha_F=float(list_order_dict2[0][0].split("_alpha")[1].split("_")[0])
   alpha_A=0.5*alpha_F
   damping=float(list_order_dict2[0][0].split("_damping")[1].split("_")[0])
   mutual_encouragement=float(list_order_dict2[0][0].split("_mutual_encourg")[1].split("_")[0])
   threshold=float(list_order_dict2[0][0].split("_threshold")[1].split("_")[0])
  
  
            
                

  
   print "Optimum (product distances and SDs) alpha=", alpha_F, " damping=",damping," mutual encourag=",mutual_encouragement," threshold",threshold
   
  
  





   output_file10="../Results/weight_shifts/Summary_results_train_test_persuasion_alpha"+str(alpha_F)+"_FIXED_damping"+str(damping)+"_mutual_encourg"+str(mutual_encouragement)+"_FIXED_threshold"+str(threshold)+"_"+str(Niter_training)+"iter_alphaA_eq_alphaF_day"+str(cutting_day)+".dat"         
   file10 = open(output_file10,'wt')    

   print >> file10, "Summary results from train-testing persuasion with",Niter_training, "iter, using the avg of the cutting points as IC, and with values for the parameters:  alpha ",alpha_F," damping: ",damping," mutual_encourg: ",mutual_encouragement," threshold:",threshold


   print >> file10,  "Look for optimum the file set of parameters (or run those simulations):",optimum_filename
  

   file10.close()




   print "Look for optimum the file set of parameters (or run those simulations):",optimum_filename
  

   print "printed out landscape file:",output_file3
Exemplo n.º 11
0
def main(graph_name):

    G = nx.read_gml(graph_name)

    cutting_day = 175  # i use this only for the filenames

    for_testing_fixed_set = "YES"  # when YES, fixed values param, to get all statistics on final distances etc
    # change the range for the parameters accordingly

    envelopes = "YES"

    Niter = 1000  # 100 iter seems to be enough (no big diff. with respect to 1000it)

    percent_envelope = 95.

    list_id_weekends_T3 = look_for_T3_weekends(
        G
    )  # T3 doesnt share fellows in the weekend  (but they are the exception)
    Nbins = 1000  # for the histogram of sum of distances

    all_team = "NO"  # as adopters or not

    dir_real_data = '../Results/'
    dir = "../Results/weight_shifts/infection/"

    delta_end = 3.  # >= than + or -  dr difference at the end of the evolution (NO realization ends up closer than this!!!! if 2, i get and empty list!!!)

    ######################################################################################
    #  I read the file of the actual evolution of the idea spreading in the hospital:   ##
    ######################################################################################

    filename_actual_evol = "../Data/Actual_evolution_adopters_NO_fellows_only_attendings_with_list_names.csv"  #   "../Results/Actual_evolution_adopters_from_inference.dat"

    file1 = open(
        filename_actual_evol, 'r'
    )  ## i read the file:  list_dates_and_names_current_adopters.txt  (created with: extract_real_evolution_number_adopters.py)
    list_lines_file = file1.readlines()

    dict_days_list_empirical_att_adopters = {}
    list_actual_evol = []
    for line in list_lines_file:  # [1:]:   # i exclude the first row
        day = int(line.split(" ")[0])
        num_adopters = float(line.split(" ")[1])
        list_actual_evol.append(num_adopters)

        list_current_att_adopters = []
        for element in line.split(
                " "
        )[2:]:  # i need to ignore the empty columns from the original datafile
            if element:
                if element != '\n':
                    list_current_att_adopters.append(
                        element.strip('\n').title())

        dict_days_list_empirical_att_adopters[day] = list_current_att_adopters

    list_actual_evol_testing = list_actual_evol[cutting_day:]

    ##################################################################

    #../Results/weight_shifts/infection/Average_time_evolution_Infection_training_p0.8_Immune0.3_1000iter_2012_avg_ic_day125.dat ESTOS VALORES SON EL OPTIMUM FIT THE 152-DIAS
    prob_min = 1.00
    prob_max = 1.001
    delta_prob = 0.1

    prob_Immune_min = 0.700
    prob_Immune_max = 0.7001
    delta_prob_Immune = 0.1

    prob_Immune = prob_Immune_min
    while prob_Immune <= prob_Immune_max:

        print "prom Immune:", prob_Immune

        prob_infection = prob_min
        while prob_infection <= prob_max:

            print "  p:", prob_infection

            output_file2 = dir + "Average_time_evolution_Infection_p" + str(
                prob_infection) + "_" + "Immune" + str(
                    prob_Immune) + "_" + str(Niter) + "iter_day" + str(
                        cutting_day) + "_Att_only_middle_real_ic.dat"

            file2 = open(output_file2, 'wt')
            file2.close()

            ########## i read the list of frequent adopters from simulations, to estimate the ic for fellows

            filename_list_simu_adopt = "../Results/weight_shifts/infection/List_adopters_fellows_descending_frequency_Infection_training_p" + str(
                prob_infection) + "_Immune" + str(
                    prob_Immune) + "_1000iter_2012_avg_ic_day" + str(
                        cutting_day) + "_Att_only_middle.dat"

            # print filename_list_simu_adopt
            file_list_simu_adopt = open(filename_list_simu_adopt, 'r')
            list_lines_file = file_list_simu_adopt.readlines()

            list_sorted_fellow_adopters = []
            for line in list_lines_file[1:]:  # i exclude the first row
                adopter = line.split(" ")[0]
                list_sorted_fellow_adopters.append(adopter)

        # print  "list sorted fellows:",list_sorted_fellow_adopters

            num_simu_Fellow_adopters_cutting_day = int(
                round(
                    float(list_lines_file[0].split("Avg # F adopters ")
                          [1].split(" ")[0])))
            #print "avg simu number Fellow adopters:",num_simu_Fellow_adopters_cutting_day,int( round(num_simu_Fellow_adopters_cutting_day))
            ################

            list_lists_t_evolutions = []

            list_dist_fixed_parameters_testing_segment = []
            list_abs_dist_at_ending_point_fixed_parameters = []
            list_dist_at_ending_point_fixed_parameters = []
            list_final_num_infected = []
            list_abs_dist_point_by_point_indiv_simus_to_actual = []
            list_dist_point_by_point_indiv_simus_to_actual = []

            for iter in range(Niter):

                #   print "     iter:",iter

                ########### set I.C.  according to the empirical data
                dict_name_node = {}
                list_I = []
                max_order = 0

                for n in G.nodes():
                    G.node[n]["status"] = "S"  # all nodes are Susceptible
                    if G.node[n]['type'] == "shift":
                        if G.node[n]['order'] > max_order:
                            max_order = G.node[n][
                                'order']  # to get the last shift-order for the time loop
                    else:
                        dict_name_node[G.node[n]["label"]] = n
                        if G.node[n][
                                'label'] in dict_days_list_empirical_att_adopters[
                                    cutting_day]:
                            G.node[n]["status"] = "I"
                            list_I.append(G.node[n]['label'])

                list_fellows = []
                for i in range(num_simu_Fellow_adopters_cutting_day):
                    fellow_adopter = list_sorted_fellow_adopters[
                        i]  # this list is sorted from more to less frequent adopter
                    node = dict_name_node[fellow_adopter]
                    G.node[node]["status"] = "I"
                    list_fellows.append(fellow_adopter)

                list_single_t_evolution = []
                old_num_adopters = len(list_I)
                list_single_t_evolution.append(
                    old_num_adopters
                )  # I always start with TWO infected doctors!!

                for n in G.nodes(
                ):  # i make some DOCTORs INMUNE  (anyone except Weiss and Wunderink)
                    if (G.node[n]['type'] == "A") or (G.node[n]['type']
                                                      == "F"):
                        if (G.node[n]['label']
                                not in dict_days_list_empirical_att_adopters[
                                    cutting_day]) and (G.node[n]['label']
                                                       not in list_fellows):
                            rand = random.random()
                            if rand < prob_Immune:
                                G.node[n]["status"] = "Immune"

                ################# the dynamics starts:

                shift_length = 5  #i know the first shift (order 0) is of length 5

                t = cutting_day
                while t <= max_order:  # loop over shifts, in order

                    for n in G.nodes():
                        if G.node[n]['type'] == "shift" and G.node[n][
                                'order'] == t:

                            shift_length = int(G.node[n]['shift_length'])

                            if shift_length == 2 and n not in list_id_weekends_T3:
                                shift_length = 1  # because during weekends, the fellow does rounds one day with Att1 and the other day with Att2.  (weekend shifts for T3 are two day long, with no sharing fellows)
                            #  print "one-day weekend", G.node[n]['label'],G.node[n]['shift_length']

                            flag_possible_infection = 0
                            for doctor in G.neighbors(
                                    n
                            ):  #first i check if any doctor is infected in this shift
                                if G.node[doctor]["status"] == "I":
                                    flag_possible_infection = 1

                            if flag_possible_infection:

                                for doctor in G.neighbors(
                                        n
                                ):  # then the doctors in that shift, gets infected with prob_infection
                                    for i in range(
                                            shift_length
                                    ):  # i repeat the infection process several times, to acount for shift length
                                        if G.node[doctor]["status"] == "S":
                                            rand = random.random()
                                            if rand < prob_infection:
                                                G.node[doctor]["status"] = "I"

                                                if G.node[doctor][
                                                        "type"] == "A":  # fellows participate in the dynamics, but i only consider the attendings as real adopters
                                                    list_I.append(
                                                        G.node[doctor]
                                                        ["label"])

                    new_num_adopters = len(list_I)

                    if shift_length == 5:  # i estimate that adoption happens in the middle of the shift
                        if t + 5 < max_order:
                            list_single_t_evolution.append(old_num_adopters)
                        if t + 4 < max_order:
                            list_single_t_evolution.append(old_num_adopters)
                        if t + 3 < max_order:
                            list_single_t_evolution.append(new_num_adopters)
                        if t + 2 < max_order:
                            list_single_t_evolution.append(new_num_adopters)
                        if t + 1 < max_order:
                            list_single_t_evolution.append(new_num_adopters)
                        t += 5

                    elif shift_length == 4:
                        if t + 4 < max_order:
                            list_single_t_evolution.append(old_num_adopters)
                        if t + 3 < max_order:
                            list_single_t_evolution.append(old_num_adopters)

                        if t + 2 < max_order:
                            list_single_t_evolution.append(new_num_adopters)

                        if t + 1 < max_order:
                            list_single_t_evolution.append(new_num_adopters)
                        t += 4

                    elif shift_length == 3:
                        if t + 3 < max_order:
                            list_single_t_evolution.append(old_num_adopters)

                        if t + 2 < max_order:
                            list_single_t_evolution.append(new_num_adopters)

                        if t + 1 < max_order:
                            list_single_t_evolution.append(new_num_adopters)

                        t += 3

                    elif shift_length == 2:
                        if t + 2 < max_order:
                            list_single_t_evolution.append(old_num_adopters)

                        if t + 1 < max_order:
                            list_single_t_evolution.append(new_num_adopters)

                        t += 2

                    elif shift_length == 1:
                        if t + 1 < max_order:
                            list_single_t_evolution.append(new_num_adopters)

                        t += 1

                    old_num_adopters = new_num_adopters

                    ######## end t loop

                list_lists_t_evolutions.append(list_single_t_evolution)

                # now i only run the testing segment!
                list_dist_fixed_parameters_testing_segment.append(
                    compare_real_evol_vs_simus_to_be_called.compare_two_curves(
                        list_actual_evol_testing, list_single_t_evolution))

                list_abs_dist_at_ending_point_fixed_parameters.append(
                    abs(list_single_t_evolution[-1] -
                        list_actual_evol_testing[-1])
                )  # i save the distance at the ending point between the current simu and actual evol
                list_dist_at_ending_point_fixed_parameters.append(
                    list_single_t_evolution[-1] - list_actual_evol_testing[-1]
                )  # i save the distance at the ending point between the current simu and actual evol
                list_final_num_infected.append(list_single_t_evolution[-1])

                for index in range(len(list_single_t_evolution)):

                    list_abs_dist_point_by_point_indiv_simus_to_actual.append(
                        abs(list_single_t_evolution[index] -
                            list_actual_evol_testing[index]))
                    list_dist_point_by_point_indiv_simus_to_actual.append(
                        list_single_t_evolution[index] -
                        list_actual_evol_testing[index])

            ######## end loop Niter

            file2 = open(output_file2, 'at')
            for s in range(len(list_single_t_evolution)):
                list_fixed_t = []
                for iter in range(Niter):
                    list_fixed_t.append(list_lists_t_evolutions[iter][s])
                print >> file2, s + cutting_day, numpy.mean(list_fixed_t)
            file2.close()

            print "printed out: ", output_file2

            if envelopes == "YES":
                calculate_envelope_set_curves.calculate_envelope(
                    list_lists_t_evolutions, percent_envelope, "Infection",
                    [prob_infection, prob_Immune])

            num_valid_endings = 0.
            for item in list_abs_dist_at_ending_point_fixed_parameters:
                if item <= delta_end:  # i count how many realizations i get close enough at the ending point
                    num_valid_endings += 1.

            print "average distance of the optimum in the testing segment:", numpy.mean(
                list_dist_fixed_parameters_testing_segment), numpy.std(
                    list_dist_fixed_parameters_testing_segment
                ), list_dist_fixed_parameters_testing_segment, "\n"
            print "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter, "mean ending dist:", numpy.mean(
                list_dist_at_ending_point_fixed_parameters
            ), "SD final dist", numpy.std(
                list_dist_at_ending_point_fixed_parameters
            ), list_dist_at_ending_point_fixed_parameters, "\n"

            histogram_filename = "../Results/weight_shifts/histogr_raw_distances_ending_infection_p" + str(
                prob_infection) + "_" + "Immune" + str(
                    prob_Immune) + "_" + str(Niter) + "iter_day" + str(
                        cutting_day) + "_Att_only_middle_real_ic.dat"
            histograma_gral_negv_posit.histograma(
                list_dist_at_ending_point_fixed_parameters, histogram_filename)

            histogram_filename3 = "../Results/weight_shifts/histogr_sum_dist_testing_segment_infection_p" + str(
                prob_infection) + "_" + "Immune" + str(
                    prob_Immune) + "_" + str(Niter) + "iter_day" + str(
                        cutting_day) + "_Att_only_middle_real_ic.dat"

            histograma_bines_gral.histograma_bins_zero(
                list_dist_fixed_parameters_testing_segment, Nbins,
                histogram_filename3)

            print min(list_dist_fixed_parameters_testing_segment), max(
                list_dist_fixed_parameters_testing_segment)

            histogram_filename4 = "../Results/weight_shifts/histogr_abs_dist_point_by_point_infection_p" + str(
                prob_infection) + "_" + "Immune" + str(
                    prob_Immune) + "_" + str(Niter) + "iter_day" + str(
                        cutting_day) + "_Att_only_middle_real_ic.dat"
            histograma_gral_negv_posit.histograma(
                list_abs_dist_point_by_point_indiv_simus_to_actual,
                histogram_filename4)

            histogram_filename5 = "../Results/weight_shifts/histogr_dist_point_by_point_infection_p" + str(
                prob_infection) + "_" + "Immune" + str(
                    prob_Immune) + "_" + str(Niter) + "iter_day" + str(
                        cutting_day) + "_Att_only_middle_real_ic.dat"
            histograma_gral_negv_posit.histograma(
                list_dist_point_by_point_indiv_simus_to_actual,
                histogram_filename5)

            output_file10 = "../Results/weight_shifts/Summary_results_infection_p" + str(
                prob_infection) + "_" + "Immune" + str(
                    prob_Immune) + "_" + str(Niter) + "iter_day" + str(
                        cutting_day) + "_Att_only_middle_real_ic.dat"
            file10 = open(output_file10, 'wt')

            print >> file10, "Summary results from best fit infection with", Niter, "iter, and with values for the parameters:  prob_inf ", prob_infection, " prob immune: ", prob_Immune, "\n"

            print >> file10, "average distance of the optimum in the testing segment:", numpy.mean(
                list_dist_fixed_parameters_testing_segment), numpy.std(
                    list_dist_fixed_parameters_testing_segment
                ), list_dist_fixed_parameters_testing_segment, "\n"
            print >> file10, "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter, "mean ending dist:", numpy.mean(
                list_dist_at_ending_point_fixed_parameters
            ), "SD final dist", numpy.std(
                list_dist_at_ending_point_fixed_parameters
            ), list_dist_at_ending_point_fixed_parameters, "\n"

            print >> file10, "written optimum best fit evolution file:", output_file2
            print >> file10, "written histogram file: ", histogram_filename

            file10.close()

            print "written Summary file: ", output_file10

            prob_infection += delta_prob
        prob_Immune += delta_prob_Immune
Exemplo n.º 12
0
def main(graph_name):

    G = nx.read_gml(
        graph_name
    )  # about the "order" in the shift nodes: not all orders exist, only every 5/2 days. thats why shifts have length that we use to weight the interactions accordingly

    list_id_weekends_T3 = look_for_T3_weekends(
        G
    )  # T3 doesnt share fellows in the weekend  (but they are the exception)

    Niter = 1000
    dir_real_data = '../Results/'

    time_window_ahead = 7  # number of days in which there will be no Adopters on call

    dict_num_Att_so_far = {
    }  # i get the dict of attendings on call until each day of the study period
    for t in range(0, 244):
        dict_num_Att_so_far[t] = find_num_Att_on_call_until_now(G, t)

    dict_num_F_so_far = {
    }  # i get the dict of attendings on call until each day of the study period
    for t in range(0, 244):
        dict_num_F_so_far[t] = find_num_Fellows_on_call_until_now(G, t)

    basic_intervention_start_day = 20  # and then plus minus a small random number

    random_start = "YES"  # if no, all iter with same initial re-seeding (intervention) day

    num_reseeds = 1  # per intervention

    min_bump = 0.0  # for the doctors that are re-seeded
    max_bump = 1.0  #same scale as the status, and the adoption threshold
    delta_bump = 0.005

    all_team = "NO"  # as adopters or not

    ######################################################################################
    #  I read the file of the actual evolution of the idea spreading in the hospital:   ##
    ######################################################################################

    if all_team == "YES":
        print "remember that now i use the file of adopters without fellows\n../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"
        exit()

    else:
        filename_actual_evol = "../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"

    file1 = open(
        filename_actual_evol, 'r'
    )  ## i read the file:  list_dates_and_names_current_adopters.txt  (created with: extract_real_evolution_number_adopters.py)
    list_lines_file = file1.readlines()

    list_actual_evol = []
    for line in list_lines_file:  # [1:]:   # i exclude the first row

        num_adopters = float(line.split(" ")[1])
        list_actual_evol.append(num_adopters)

##################################################################

# i use the best fit  (over the 250-day curve):
#  from only Att counted as adopters:  ../Results/weight_shifts/persuasion/alpha0.10_damping0.00/Time_evol_Persuasion_alpha0.1_damping0.0_mutual0.5_threshold0.3_1000iter.dat  THIS IS THE THIRD BEST SOLUTION, BUT ENDS UP CLOSER, SO I PREFER TO USE THIS, TO CHECK THE PERFORMANCE WHEN BUMP=0

    alpha_F = 0.10  # alpha=0: nobody changes their mind

    alpha_A = alpha_F

    damping = 0.0  #its harder to go back from YES to NO again. =1 means no effect, =0.5 half the movement from Y->N than the other way around, =0 never go back from Y to N

    mutual_encouragement = 0.50  # when two Adopters meet, they convince each other even more

    threshold = 0.50  # larger than, to be an Adopte

    print "\n\nPersuasion process on network, with Niter:", Niter, "\n"

    dir = "../Results/weight_shifts/persuasion/"

    output_file2 = "../Results/weight_shifts/Final_distance_vs_bump_alpha" + str(
        alpha_F) + "_damping" + str(damping) + "_mutual_encourg" + str(
            mutual_encouragement
        ) + "_threshold" + str(threshold) + "_num_reseed_per_shift" + str(
            num_reseeds) + "_" + str(Niter) + "iter_intervention_start" + str(
                basic_intervention_start_day) + "_window" + str(
                    time_window_ahead) + ".dat"
    file2 = open(output_file2, 'wt')
    file2.close()

    output_file5 = "../Results/weight_shifts/Landscape_intervention_alpha" + str(
        alpha_F) + "_damping" + str(damping) + "_mutual_encourg" + str(
            mutual_encouragement) + "_threshold" + str(
                threshold) + "_num_reseed_per_shift" + str(
                    num_reseeds) + "_" + str(Niter) + "iter_start" + str(
                        basic_intervention_start_day) + "_window" + str(
                            time_window_ahead) + ".dat"
    file5 = open(output_file5, 'wt')
    file5.close()

    bump = min_bump
    while bump <= max_bump:

        print "bump:", bump

        output_file = dir + "Time_evolutions_Persuasion_alpha" + str(
            alpha_F) + "_damping" + str(damping) + "_mutual_encourg" + str(
                mutual_encouragement
            ) + "_threshold" + str(threshold) + "_num_reseed_per_shift" + str(
                num_reseeds) + "_" + str(
                    Niter) + "iter_intervention_start" + str(
                        basic_intervention_start_day) + "_window" + str(
                            time_window_ahead) + "_bump" + str(bump) + ".dat"
        file = open(output_file, 'wt')
        file.close()

        list_intervention_days = []

        dict_days_list_distances_Att = {}
        for i in range(0, 244):
            dict_days_list_distances_Att[i] = []

        list_distances_150day_tot = []
        list_distances_150day_Att = []
        list_distances_150day_F = []

        list_distances_200day_tot = []
        list_distances_200day_Att = []
        list_distances_200day_F = []

        list_distances_at_end_tot = []
        list_distances_at_end_Att = []
        list_distances_at_end_F = []

        tot_number_interventions = 0
        tot_number_interventions_Att = 0
        tot_number_interventions_F = 0

        tot_number_successful_interventions = 0
        tot_number_successful_interventions_Att = 0
        tot_number_successful_interventions_F = 0

        time_evol_number_adopters_tot_ITER = [
        ]  # list of complete single realizations of the dynamics
        time_evol_number_adopters_Att_ITER = []
        time_evol_number_adopters_F_ITER = []

        for iter in range(Niter):

            print "   iter:   ", iter

            list_t = []
            time_evol_number_adopters_Att = [
            ]  # for a single realization of the dynamics ONLY ATTENDING ADOPTERS
            time_evol_number_adopters_F = [
            ]  # for a single realization of the dynamics ONLY fellows ADOPTERS
            time_evol_number_tot_adopters = [
            ]  # Attendings and fellows as adopters

            if random_start == "YES":  # i pick the first intervention day
                sign = random.random()
                if sign < 0.5:
                    sign = -1.
                else:
                    sign = 1.

                delta_day = random.random() * 5.
                start_intervention = int(
                    basic_intervention_start_day + sign * delta_day
                )  #    i let the system evolve freely for a some time  before i start re-seeding

            else:
                start_intervention = basic_intervention_start_day

            num_adopters, seed_shift, max_shift = set_ic(
                G, threshold
            )  # i establish who is Adopter and NonAdopter initially, and count how many shifts i have total

            time_evol_number_adopters_Att.append(
                float(num_adopters))  # the initial two adopters i know are att
            time_evol_number_adopters_F.append(float(0))
            time_evol_number_tot_adopters.append(
                float(num_adopters))  # Attendings and fellows as adopters

            list_t.append(0)

            next_intervention_day = start_intervention

            # the dynamics starts:
            t = int(seed_shift) + 1

            while t <= max_shift:  # loop over shifts, in chronological order  (the order is the day index since seeding_day)
                # print t,  time_evol_number_adopters_Att

                old_num_Att_Adopters = 0
                old_num_F_Adopters = 0
                old_num_Adopters = 0  #count number of adopters before an intervention
                for n in G.nodes():
                    try:
                        if G.node[n]["status"] == "Adopter":
                            old_num_Adopters += 1.
                            if G.node[n]["type"] == "A":
                                old_num_Att_Adopters += 1.
                            elif G.node[n]["type"] == "F":
                                old_num_F_Adopters += 1.

                    except KeyError:
                        pass  #to ignore the shift-nodes

                flag_future = look_ahead(
                    G, time_window_ahead, t
                )  # i evaluate the next few days, to see if any adopter will be on call: if not, i re-seed some more
                if flag_future == "YES" and t >= start_intervention and t == next_intervention_day:
                    flag_tot, flag_Att, flag_F = intervention(
                        G, t, bump, threshold, num_reseeds,
                        list_intervention_days)  # num of Att intervened

                    next_intervention_day += time_window_ahead  # i dont reseed everyday of the look_ahead window, just the first day of it

                    tot_number_interventions += flag_tot
                    tot_number_interventions_Att += flag_Att
                    tot_number_interventions_F += flag_F

                    num_Att_Adopters = 0
                    num_F_Adopters = 0
                    num_Adopters = 0
                    for n in G.nodes():
                        try:
                            if G.node[n][
                                    "status"] == "Adopter":  #first i check if any doctor is an adopter in this shift
                                num_Adopters += 1.
                                if G.node[n][
                                        "type"] == "A":  #first i check if any doctor is an adopter in this shift
                                    num_Att_Adopters += 1.
                                elif G.node[n][
                                        "type"] == "F":  #first i check if any doctor is an adopter in this shift
                                    num_F_Adopters += 1.

                        except KeyError:
                            pass

                    if old_num_Adopters < num_Adopters:

                        tot_number_successful_interventions += 1
                        if num_Adopters - old_num_Adopters > num_reseeds:
                            print "how did i bumped more doctors than", num_reseed, "?? ", t, ": ", num_Adopters - old_num_Adopters

                    if old_num_Att_Adopters < num_Att_Adopters:
                        tot_number_successful_interventions_Att += 1

                    if old_num_F_Adopters < num_F_Adopters:
                        tot_number_successful_interventions_F += 1

                list_t.append(t)
                for n in G.nodes():
                    if G.node[n]['type'] == "shift" and G.node[n][
                            'order'] == t:  # i look for the shift corresponding to that time step

                        shift_lenght = int(G.node[n]['shift_lenght'])

                        if shift_lenght == 2 and n not in list_id_weekends_T3:
                            shift_lenght = 1  # because during weekends, the fellow does rounds one day with Att1 and the other day with Att2.  (weekend shifts for T3 are two day long, with no sharing fellows)

                        flag_possible_persuasion = 0
                        for doctor in G.neighbors(n):
                            if G.node[doctor][
                                    "status"] == "Adopter":  #first i check if any doctor is an adopter in this shift
                                flag_possible_persuasion = 1
                                break

                        if flag_possible_persuasion == 1:
                            list_doctors = []
                            for doctor in G.neighbors(
                                    n):  # for all drs in that shift
                                list_doctors.append(doctor)

                            pairs = itertools.combinations(
                                list_doctors,
                                2)  # cos the shift can be 2 but also 3 doctors
                            for pair in pairs:
                                doctor1 = pair[0]
                                doctor2 = pair[1]

                                if G.node[doctor1]['status'] != G.node[doctor2][
                                        'status']:  # if they think differently,
                                    # there will be persuasion
                                    persuasion(
                                        G, damping, doctor1, doctor2, alpha_A,
                                        alpha_F, threshold, shift_lenght
                                    )  # i move their values of opinion
                                    update_opinions(
                                        G, threshold, doctor1, doctor2
                                    )  #  i update status and make sure the values of the vectors stay between [0,1]

                                else:  # if two Adopters meet, they encourage each other (if two NonAdopters, nothing happens)

                                    mutual_reinforcement(
                                        G, mutual_encouragement, doctor1,
                                        doctor2, shift_lenght)

                list_Att_Adopters = []
                list_F_Adopters = []
                list_Adopters = []  #count how many i have at this time
                for n in G.nodes():
                    try:
                        if G.node[n]["status"] == "Adopter":
                            list_Adopters.append(G.node[n]["label"])
                            if G.node[n]["type"] == "A":
                                list_Att_Adopters.append(G.node[n]["label"])
                            elif G.node[n]["type"] == "F":
                                list_F_Adopters.append(G.node[n]["label"])
                    except:
                        pass  # if the node is a shift, it doesnt have a 'status' attribute

                time_evol_number_adopters_Att.append(
                    float(len(list_Att_Adopters)))
                time_evol_number_adopters_F.append(float(len(list_F_Adopters)))
                time_evol_number_tot_adopters.append(float(
                    len(list_Adopters)))  # Attendings and fellows as adopters

                #print t, "num (att) adopters:",len(list_Att_Adopters),"num (f) adopters:",len(list_F_Adopters),"num (tot) adopters:",len(list_Adopters)

                dict_days_list_distances_Att[t].append(
                    time_evol_number_adopters_Att[t] - list_actual_evol[t])

                t += 1

                ############# end while loop over t

            time_evol_number_adopters_Att_ITER.append(
                time_evol_number_adopters_Att)
            time_evol_number_adopters_F_ITER.append(
                time_evol_number_adopters_F)
            time_evol_number_adopters_tot_ITER.append(
                time_evol_number_tot_adopters)

            list_distances_150day_Att.append(
                time_evol_number_adopters_Att[-93] -
                list_actual_evol[-93])  # because last day is 243
            list_distances_150day_tot.append(
                time_evol_number_tot_adopters[-93] - list_actual_evol[-93])

            list_distances_200day_Att.append(
                time_evol_number_adopters_Att[-43] - list_actual_evol[-43])
            list_distances_200day_tot.append(
                time_evol_number_tot_adopters[-43] - list_actual_evol[-43])

            list_distances_at_end_Att.append(
                time_evol_number_adopters_Att[-1] - list_actual_evol[-1])
            list_distances_at_end_tot.append(
                time_evol_number_tot_adopters[-1] - list_actual_evol[-1])

        #  print  "diff. at day 200:",time_evol_number_adopters[-43]-list_actual_evol[-43],time_evol_number_adopters[-43],list_actual_evol[-43],"diff. at the end:",time_evol_number_adopters[-1]-list_actual_evol[-1],time_evol_number_adopters[-1],list_actual_evol[-1] ," if i count tot Att+Fellows at end:",time_evol_number_tot_adopters[-1],"for bump:",bump

    ##############end loop Niter

        parameters = [
            alpha_F, damping, mutual_encouragement, threshold, bump,
            time_window_ahead
        ]
        calculate_envelope_set_curves.calculate_envelope(
            time_evol_number_adopters_Att_ITER, 95, "Persuasion_intervention",
            parameters)

        # print time_evol_number_adopters_Att_ITER

        file = open(output_file, 'wt')
        for i in range(
                len(time_evol_number_adopters_Att)):  #time step by time step
            list_fixed_t_Att = []
            list_fixed_t_F = []
            list_fixed_t_tot = []

            for iteracion in range(
                    Niter):  #loop over all independent iter of the process

                list_fixed_t_Att.append(
                    time_evol_number_adopters_Att_ITER[iteracion]
                    [i])  # i collect all values for the same t, different iter
                list_fixed_t_F.append(
                    time_evol_number_adopters_F_ITER[iteracion]
                    [i])  # i collect all values for the same t, different iter
                list_fixed_t_tot.append(
                    time_evol_number_adopters_tot_ITER[iteracion]
                    [i])  # i collect all values for the same t, different iter

            print >> file, list_t[i], numpy.mean(list_fixed_t_Att), numpy.std(
                list_fixed_t_Att
            ), numpy.mean(list_fixed_t_F), numpy.std(
                list_fixed_t_F
            ), numpy.mean(list_fixed_t_tot), numpy.std(
                list_fixed_t_tot
            ), dict_num_Att_so_far[i], numpy.mean(list_fixed_t_Att) / float(
                dict_num_Att_so_far[i]), dict_num_F_so_far[i], numpy.mean(
                    list_fixed_t_F) / float(
                        dict_num_F_so_far[i]
                    ), alpha_F, damping, mutual_encouragement, threshold
        file.close()

        print "   written:", output_file

        try:
            fraction_success_interv = float(
                tot_number_successful_interventions) / float(
                    tot_number_interventions)  #averages over Niter
        except ZeroDivisionError:
            fraction_success_interv = 0

        try:
            fraction_success_interv_Att = float(
                tot_number_successful_interventions_Att) / float(
                    tot_number_interventions_Att)  #averages over Niter
        except ZeroDivisionError:
            fraction_success_interv_Att = 0

        try:
            fraction_success_interv_F = float(
                tot_number_successful_interventions_F) / float(
                    tot_number_interventions_F)  #averages over Niter
        except ZeroDivisionError:
            fraction_success_interv_F = 0

        file2 = open(output_file2, 'at')
        print >> file2, bump, numpy.mean(list_distances_150day_Att),numpy.std(list_distances_150day_Att), \
            numpy.mean(list_distances_150day_tot),numpy.std(list_distances_150day_tot), \
            numpy.mean(list_distances_200day_Att),numpy.std(list_distances_200day_Att), \
            numpy.mean(list_distances_200day_tot),numpy.std(list_distances_200day_tot),\
            numpy.mean(list_distances_at_end_Att),numpy.std(list_distances_at_end_Att), \
            numpy.mean(list_distances_at_end_tot), numpy.std(list_distances_at_end_tot),\
            float(tot_number_interventions)/float(Niter) , float(tot_number_interventions_Att)/float(Niter) ,float(tot_number_interventions_F)/float(Niter) ,fraction_success_interv, fraction_success_interv_Att, fraction_success_interv_F, bump/float(threshold)
        file2.close()

        print "fraction successful interventions", fraction_success_interv, "fraction successful interventions on Att", fraction_success_interv_Att, "fraction successful interventions on F", fraction_success_interv_F

        print "   written:", output_file2

        if len(list_intervention_days) > 0:
            histogram_filename = "../Results/weight_shifts/histogr_interv_days_alpha" + str(
                alpha_F) + "_damping" + str(damping) + "_mutual_encourg" + str(
                    mutual_encouragement) + "_threshold" + str(
                        threshold
                    ) + "_num_reseed_per_shift" + str(num_reseeds) + "_" + str(
                        Niter) + "iter_intervention_start" + str(
                            basic_intervention_start_day) + "_window" + str(
                                time_window_ahead) + "_bump" + str(
                                    bump) + ".dat"

            #       print "list interv days:",list_intervention_days

            histograma_gral_negv_posit.histograma(
                list_intervention_days, histogram_filename, True
            )  # last parameter: whether i want to print out the values Prob=0 or not

            print "written histogram interv. days:", histogram_filename

        else:
            print "empty list_intervention_days"
            raw_input()

        file5 = open(output_file5, 'at')
        for t in range(0, 244):
            print >> file5, bump, t, numpy.mean(
                dict_days_list_distances_Att[t]), numpy.std(
                    dict_days_list_distances_Att[t]
                ), alpha_A, damping, mutual_encouragement, threshold
        # print bump, t, numpy.mean(dict_days_list_distances_Att[t]),dict_days_list_distances_Att[t]
        file5.close()

        bump += delta_bump
        ########################## end loop over bump

    print "\nwritten:", output_file2
    print "   written:", output_file5
Exemplo n.º 13
0
def main(graph_name):

    cutting_day = 125  # to separate   training-testing

    G = nx.read_gml(graph_name)

    all_team = "NO"  # as adopters or not

    dir_real_data = '../Results/'

    delta_end = 3  # >= than + or -  dr difference at the end of the evolution (NO realization ends up closer than this!!!! if 2, i get and empty list!!!)

    Niter_training = 1000
    Niter_testing = 1000

    ######################################################################################
    #  I read the file of the actual evolution of the idea spreading in the hospital:   ##
    ######################################################################################

    if all_team == "YES":
        filename_actual_evol = dir_real_data + "HospitalModel_august1_adoption_counts_all_team_as_adopters_SIMPLER.csv"

    else:
        filename_actual_evol = dir_real_data + "HospitalModel_august1_adoption_counts_SIMPLER.csv"
    #ya no necesito CAMBIAR TB EL NOMBRE DEL ARCHIVO EN EL CODIGO PARA COMPARAR CURVAs

    list_actual_evol = []
    result_actual_file = csv.reader(open(filename_actual_evol, 'rb'),
                                    delimiter=',')
    cont = 0
    for row in result_actual_file:
        if cont > 0:  # i ignore the first line with the headers

            num_adopters = row[3]

            list_actual_evol.append(float(num_adopters))

        cont += 1

    list_actual_evol_training = list_actual_evol[:cutting_day]
    list_actual_evol_testing = list_actual_evol[(cutting_day - 1):]

    ##################################################################

    #../Results/network_final_schedule_withTeam3/infection/Average_time_evolution_Infection_p0.9_Immune0.5_1000iter_2012.dat

    prob_min = 0.09
    prob_max = 1.01
    delta_prob = 0.1

    prob_Immune_min = 0.00
    prob_Immune_max = 1.01
    delta_prob_Immune = 0.1

    dir = "../Results/network_final_schedule_withTeam3_local/infection/"

    dict_filenames_tot_distance = {
    }  # i will save the filename as key and the tot distance from that curve to the original one

    dict_filenames_list_dict_network_states = {
    }  # i will save the filename as key and the list of networks at cutting day as value

    prob_Immune = prob_Immune_min
    while prob_Immune <= prob_Immune_max:

        print "prom Immune:", prob_Immune

        prob_infection = prob_min
        while prob_infection <= prob_max:

            print "  p:", prob_infection

            output_file2 = dir + "Average_time_evolution_Infection_training_p" + str(
                prob_infection) + "_" + "Immune" + str(
                    prob_Immune) + "_" + str(Niter_training) + "iter_2012.dat"
            file2 = open(output_file2, 'wt')
            file2.close()

            # i create the empty list of list for the Niter temporal evolutions
            num_shifts = 0
            for n in G.nodes():
                G.node[n]["status"] = "S"
                if G.node[n]['type'] == "shift":
                    num_shifts += 1

        #  list_final_I_values_fixed_p=[]  # i dont care about the final values right now, but about the whole time evol
            list_lists_t_evolutions = []

            list_dist_fixed_parameters = []
            list_dist_abs_at_ending_point_fixed_parameters = []

            list_dict_network_states = []

            for iter in range(Niter_training):

                print "     iter:", iter

                dict_network_states = {}

                #######OJO~!!!!!!!!!! COMENTAR ESTO CUANDO ESTOY BARRIENDO TOOOOOOOOOODO EL ESPACIO DE PARAMETROS
                #   file_name_indiv_evol=output_file2.strip("Average_").split('.dat')[0]+"_indiv_iter"+str(iter)+".dat"

                #  file4 = open(file_name_indiv_evol,'wt')
                # file4.close()
                ##########################################

                list_I = []  #list infected doctors
                list_ordering = []
                list_s = []
                list_A = []
                list_F = []

                ########### set I.C.

                max_order = 0
                for n in G.nodes():
                    G.node[n]["status"] = "S"  # all nodes are Susceptible
                    if G.node[n]['type'] == "shift":
                        list_s.append(n)
                        if G.node[n]['order'] > max_order:
                            max_order = G.node[n]['order']
                    else:
                        if G.node[n]['label'] == "Wunderink" or G.node[n][
                                "label"] == "Weiss":
                            G.node[n]["status"] = "I"
                            list_I.append(G.node[n]['label'])

                            ######################## WHAT ABOUT SMITH AND SPORN???

                        if G.node[n]['type'] == "A":
                            list_A.append(n)

                        if G.node[n]['type'] == "F":
                            list_F.append(n)

                list_single_t_evolution = []
                list_single_t_evolution.append(
                    2.0)  # I always start with TWO infected doctors!!

                for n in G.nodes(
                ):  # i make some DOCTORs INMUNE  (anyone except Weiss and Wunderink)
                    if (G.node[n]['type'] == "A") or (G.node[n]['type']
                                                      == "F"):
                        if G.node[n]['label'] != "Wunderink" and G.node[n][
                                "label"] != "Weiss":
                            rand = random.random()
                            if rand < prob_Immune:
                                G.node[n]["status"] = "Immune"

            #   print max_order

            ################# the dynamics starts:

                t = 1
                while t < cutting_day:  # loop over shifts, in order   just until cutting day (training segment)
                    for n in G.nodes():
                        if G.node[n]['type'] == "shift" and G.node[n][
                                'order'] == t:
                            flag_possible_infection = 0
                            for doctor in G.neighbors(
                                    n
                            ):  #first i check if any doctor is infected in this shift
                                if G.node[doctor]["status"] == "I":
                                    flag_possible_infection = 1

                            if flag_possible_infection:
                                for doctor in G.neighbors(
                                        n
                                ):  # then the doctors in that shift, gets infected with prob_infection
                                    if G.node[doctor]["status"] == "S":
                                        rand = random.random()
                                        if rand < prob_infection:
                                            G.node[doctor]["status"] = "I"
                                            list_I.append(
                                                G.node[doctor]["label"])

                    list_single_t_evolution.append(float(
                        len(list_I)))  #/(len(list_A)+len(list_F)))

                    t += 1

                ######## end t loop

                for n in G.nodes():
                    if G.node[n]['type'] != "shift":
                        dict_network_states[G.node[n]
                                            ["label"]] = G.node[n]["status"]

                list_dict_network_states.append(dict_network_states)

                # print "number infected at the cutting point:", len(list_I), list_I

                list_lists_t_evolutions.append(list_single_t_evolution)

                list_dist_fixed_parameters.append(
                    compare_real_evol_vs_simus_to_be_called.compare_two_curves(
                        list_actual_evol_training, list_single_t_evolution))

                list_dist_abs_at_ending_point_fixed_parameters.append(
                    abs(list_single_t_evolution[-1] -
                        list_actual_evol_training[-1])
                )  # i save the distance at the ending point between the current simu and actual evol

            # print "distance at ending point:", abs(list_single_t_evolution[-1]-list_actual_evol_training[-1])

            ######## end loop Niter for the training fase

            list_pair_dist_std_delta_end = []

            list_pair_dist_std_delta_end.append(
                numpy.mean(list_dist_fixed_parameters)
            )  # average dist between the curves over Niter
            list_pair_dist_std_delta_end.append(
                numpy.std(list_dist_fixed_parameters))

            list_pair_dist_std_delta_end.append(
                numpy.mean(list_dist_abs_at_ending_point_fixed_parameters))

            if (
                    numpy.mean(list_dist_abs_at_ending_point_fixed_parameters)
            ) <= delta_end:  # i only consider situations close enough at the ending point

                dict_filenames_tot_distance[
                    output_file2] = list_pair_dist_std_delta_end

                dict_filenames_list_dict_network_states[
                    output_file2] = list_dict_network_states

            file2 = open(output_file2, 'at')
            for s in range(len(list_single_t_evolution)):
                list_fixed_t = []
                for iter in range(Niter_training):
                    list_fixed_t.append(list_lists_t_evolutions[iter][s])
                print >> file2, s, numpy.mean(list_fixed_t)
            file2.close()

            prob_infection += delta_prob
        prob_Immune += delta_prob_Immune

    list_order_dict = compare_real_evol_vs_simus_to_be_called.pick_minimum_same_end(
        dict_filenames_tot_distance, "Infection_training", all_team,
        Niter_training)
    # it returns a list of tuples like this :  ('../Results/network_final_schedule_withTeam3_local/infection/Average_time_evolution_Infection_training_p0.7_Immune0.0_2iter_2012.dat', [2540.0, 208.0, 1.0])  the best set of parameters  being the fist one of the elements in the list.

    optimum_filename = list_order_dict[0][0]

    prob_infection = float(list_order_dict[0][0].split("_p")[1][0:3])
    prob_Immune = float(list_order_dict[0][0].split("_Immune")[1][0:3])

    #   raw_input()
    print "starting testing fase with:"
    print "p=", prob_infection, " and Pimmune=", prob_Immune

    #  i already know the optimum, now i run the dynamics with those values, starting from the average state on the cutting point, and test:

    list_dist_fixed_parameters = []
    list_dist_at_ending_point_fixed_parameters = []
    list_dist_abs_at_ending_point_fixed_parameters = []

    list_lists_t_evolutions = []

    for iter in range(Niter_testing):

        dict_dr_status_current_iter = dict_filenames_list_dict_network_states[
            optimum_filename][iter]

        #  print dict_dr_status_current_iter
        list_I = []  #list infected doctors
        list_Immune = []
        for node in G.nodes():
            if G.node[node]['type'] != "shift":
                label = G.node[node]['label']

                G.node[node]["status"] = "S"  #by default, all are susceptible

                G.node[node]["status"] = dict_dr_status_current_iter[label]
                if G.node[node]["status"] == "I":
                    list_I.append(G.node[node]["label"])
                elif G.node[node]["status"] == "Immune":
                    list_Immune.append(G.node[node]["label"])

        print "# I at the beginning of the testing fase:", len(list_I), float(
            len(list_I)) / 36., " and # Immune:", len(list_Immune), float(
                len(list_Immune)) / 36.

        #  print "     iter:",iter

        list_single_t_evolution = []
        list_single_t_evolution.append(len(list_I))

        t = cutting_day
        while t <= max_order:  # loop over shifts, in order   just until cutting day (training segment)

            for n in G.nodes():
                if G.node[n]['type'] == "shift" and G.node[n]['order'] == t:
                    flag_possible_infection = 0
                    for doctor in G.neighbors(
                            n
                    ):  #first i check if any doctor is infected in this shift
                        if G.node[doctor]["status"] == "I":
                            flag_possible_infection = 1

                    if flag_possible_infection:
                        for doctor in G.neighbors(
                                n
                        ):  # then the doctors in that shift, gets infected with prob_infection
                            if G.node[doctor]["status"] == "S":
                                rand = random.random()
                                if rand < prob_infection:
                                    G.node[doctor]["status"] = "I"
                                    list_I.append(G.node[doctor]["label"])

            list_single_t_evolution.append(float(len(list_I)))

            t += 1

        list_lists_t_evolutions.append(list_single_t_evolution)

        list_I = []  #list infected doctors
        list_Immune = []
        for node in G.nodes():
            if G.node[node]['type'] != "shift":
                label = G.node[node]['label']

                if G.node[node]["status"] == "I":
                    list_I.append(G.node[node]["label"])
                elif G.node[node]["status"] == "Immune":
                    list_Immune.append(G.node[node]["label"])

        print "  # I at the END of the testing fase:", len(list_I), float(
            len(list_I)) / 36., " and # Immune:", len(list_Immune), float(
                len(list_Immune)) / 36., "\n"

        list_dist_fixed_parameters.append(
            compare_real_evol_vs_simus_to_be_called.compare_two_curves(
                list_actual_evol_testing, list_single_t_evolution))

        list_dist_abs_at_ending_point_fixed_parameters.append(
            abs(list_single_t_evolution[-1] - list_actual_evol_testing[-1])
        )  # i save the distance at the ending point between the current simu and actual evol

        list_dist_at_ending_point_fixed_parameters.append(
            list_single_t_evolution[-1] - list_actual_evol_testing[-1]
        )  # i save the distance at the ending point between the current simu and actual evol

    ############### end loop Niter  for the testing

    num_valid_endings = 0.
    for item in list_dist_abs_at_ending_point_fixed_parameters:
        if item <= delta_end:  # i count how many realizations i get close enough at the ending point
            num_valid_endings += 1.

    print "average distance of the optimum in the testing segment:", numpy.mean(
        list_dist_fixed_parameters), numpy.std(
            list_dist_fixed_parameters), list_dist_fixed_parameters
    print "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter_testing, list_dist_abs_at_ending_point_fixed_parameters

    histograma_gral_negv_posit.histograma(
        list_dist_at_ending_point_fixed_parameters,
        "../Results/histogr_raw_distances_ending_test_train_infection_p" +
        str(prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" +
        str(Niter_training) + "iter_end_point.dat")

    output_file8 = "../Results/List_tot_distances_training_segment_infection_p" + str(
        prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" + str(
            Niter_training) + "iter_end_point.dat"
    file8 = open(output_file8, 'wt')

    for item in list_dist_fixed_parameters:
        print >> file8, item
    file8.close()

    output_file9 = "../Results/List_distances_ending_training_segment_infection_p" + str(
        prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" + str(
            Niter_training) + "iter_end_point.dat"
    file9 = open(output_file9, 'wt')

    for item in list_dist_abs_at_ending_point_fixed_parameters:
        print >> file9, item
    file9.close()

    output_file5 = dir + "Average_time_evolution_Infection_testing_p" + str(
        prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" + str(
            Niter_testing) + "iter_2012.dat"

    file5 = open(output_file5, 'wt')
    for s in range(len(list_single_t_evolution)):
        list_fixed_t = []
        for iter in range(Niter_testing):
            list_fixed_t.append(list_lists_t_evolutions[iter][s])
        print >> file5, s + cutting_day, numpy.mean(list_fixed_t)
    #  print  s+cutting_day,numpy.mean(list_fixed_t)
    file5.close()

    print "written training segment file:", optimum_filename
    print "written testing segment file:", output_file5

    output_file10 = "../Results/Summary_results_training_segment_infection_p" + str(
        prob_infection) + "_" + "Immune" + str(prob_Immune) + "_" + str(
            Niter_training) + "iter.dat"
    file10 = open(output_file10, 'wt')

    print >> file10, "Summary results from train-testing persuasion with", Niter_training, Niter_testing, "iter (respectively), using all the individual cutting points as IC, and with values for the parameters:  prob_inf ", prob_infection, " prob immune: ", prob_Immune

    print >> file10, "average distance of the optimum in the testing segment:", numpy.mean(
        list_dist_fixed_parameters), numpy.std(
            list_dist_fixed_parameters), list_dist_fixed_parameters
    print >> file10, "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter_testing, list_dist_at_ending_point_fixed_parameters

    print >> file10, "written training segment file:", optimum_filename
    print >> file10, "written testing segment file:", output_file5

    file10.close()
def main(graph_name):

    G = nx.read_gml(graph_name)

    for_testing_fixed_set = "YES"  # when YES, fixed values param, to get all statistics on final distances etc
    # change the range for the parameters accordingly

    envelopes = "NO"

    Niter = 1000

    percent_envelope = 95.

    list_id_weekends_T3 = look_for_T3_weekends(
        G
    )  # T3 doesnt share fellows in the weekend  (but they are the exception)

    cutting_day = 175

    all_team = "NO"  # as adopters or not

    dir_real_data = '../Results/'

    dir = "../Results/weight_shifts/infection/"

    delta_end = 3.  # >= than + or -  dr difference at the end of the evolution (NO realization ends up closer than this!!!! if 2, i get and empty list!!!)
    Nbins = 20  # for the histogram of sum of distances

    if for_testing_fixed_set == "NO":
        output_file3 = "../Results/weight_shifts/Landscape_parameters_infection_memory_fixed_dose_thr_" + str(
            Niter) + "iterFIXED_Thr0.2_Imm0.0.dat"
        file3 = open(output_file3, 'wt')
        file3.close()

######################################################################################
#  I read the file of the actual evolution of the idea spreading in the hospital:   ##
######################################################################################

    if all_team == "YES":
        print "remember that now i use the file of adopters without fellows\n../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"
        exit()

    else:
        filename_actual_evol = "../Results/Actual_evolution_adopters_NO_fellows_only_attendings.dat"

    file1 = open(
        filename_actual_evol, 'r'
    )  ## i read the file:  list_dates_and_names_current_adopters.txt  (created with: extract_real_evolution_number_adopters.py)
    list_lines_file = file1.readlines()

    list_actual_evol = []
    for line in list_lines_file:  # [1:]:   # i exclude the first row

        num_adopters = float(line.split(" ")[1])
        list_actual_evol.append(num_adopters)

################################################################################

    prob_min = 0.3
    prob_max = 0.301
    delta_prob = 0.1

    prob_Immune_min = 0.00
    prob_Immune_max = 0.001
    delta_prob_Immune = 0.1

    dose_min = 0.7  # of a single encounter with an infected  (starting from zero doesnt make sense)
    dose_max = 0.701
    delta_dose = 0.01

    ##########  KEEP FIXED TO ONE
    infect_threshold_min = 1.00  # i can define the dose in units of the threshold
    infect_threshold_max = 1.001
    delta_infect_threshold = 0.1
    ############

    dict_filenames_tot_distance = {
    }  # i will save the filename as key and the tot distance from that curve to the original one

    prob_Immune = prob_Immune_min
    while prob_Immune <= prob_Immune_max:

        print "prom Immune:", prob_Immune

        prob_infection = prob_min
        while prob_infection <= prob_max:

            print "  p:", prob_infection

            infect_threshold = infect_threshold_min
            while infect_threshold <= infect_threshold_max:

                print "  threshold:", infect_threshold

                dose = dose_min
                while dose <= dose_max:

                    print "  dose:", dose

                    if for_testing_fixed_set == "YES":
                        output_file2 = dir + "Average_time_evolution_Infection_memory_train_test_p" + str(
                            prob_infection) + "_Immune" + str(
                                prob_Immune) + "_FIXED_threshold" + str(
                                    infect_threshold) + "_dose" + str(
                                        dose) + "_" + str(Niter) + "iter.dat"
                    else:
                        output_file2 = dir + "Average_time_evolution_Infection_memory_p" + str(
                            prob_infection) + "_Immune" + str(
                                prob_Immune) + "_FIXED_threshold" + str(
                                    infect_threshold) + "_dose" + str(
                                        dose) + "_" + str(Niter) + "iter.dat"

                    file2 = open(output_file2, 'wt')
                    file2.close()

                    num_shifts = 0
                    for n in G.nodes():
                        G.node[n]["status"] = "S"
                        G.node[n][
                            "infec_value"] = 0.  # when this value goes over the infect_threshold, the dr is infected
                        if G.node[n]['type'] == "shift":
                            num_shifts += 1

                    list_lists_t_evolutions = [
                    ]  # i create the empty list of list for the Niter temporal evolutions

                    list_dist_fixed_parameters = []
                    list_abs_dist_at_ending_point_fixed_parameters = []
                    list_dist_at_ending_point_fixed_parameters = []
                    list_final_num_infected = []

                    for iter in range(Niter):

                        #  print "     iter:",iter

                        list_I = []  #list infected doctors
                        list_ordering = []
                        list_s = []

                        ########### set I.C.

                        max_order = 0
                        for n in G.nodes():
                            G.node[n][
                                "status"] = "S"  # all nodes are Susceptible
                            if G.node[n]['type'] == "shift":
                                list_s.append(n)
                                if G.node[n]['order'] > max_order:
                                    max_order = G.node[n]['order']
                            else:
                                if G.node[n]['label'] == "Wunderink" or G.node[
                                        n]["label"] == "Weiss":
                                    G.node[n]["status"] = "I"
                                    G.node[n][
                                        "infec_value"] = infect_threshold + 1.
                                    list_I.append(G.node[n]['label'])

                        list_single_t_evolution = []
                        list_single_t_evolution.append(
                            2.0)  # I always start with TWO infected doctors!!

                        for n in G.nodes(
                        ):  # i make some DOCTORs INMUNE  (anyone except Weiss and Wunderink)
                            if (G.node[n]['type'] == "A") or (G.node[n]['type']
                                                              == "F"):
                                if G.node[n]['label'] != "Wunderink" and G.node[
                                        n]["label"] != "Weiss":
                                    rand = random.random()
                                    if rand < prob_Immune:
                                        G.node[n]["status"] = "Immune"

                        ################# the dynamics starts:

                        t = 1
                        while t <= max_order:  # loop over shifts, in order
                            for n in G.nodes():
                                if G.node[n]['type'] == "shift" and G.node[n][
                                        'order'] == t:
                                    shift_lenght = int(
                                        G.node[n]['shift_lenght'])

                                    if shift_lenght == 2 and n not in list_id_weekends_T3:
                                        shift_lenght = 1  # because during weekends, the fellow does rounds one day with Att1 and the other day with Att2.  (weekend shifts for T3 are two day long, with no sharing fellows)

                                    flag_possible_infection = 0
                                    for doctor in G.neighbors(
                                            n
                                    ):  #first i check if any doctor is infected in this shift
                                        if G.node[doctor]["status"] == "I":
                                            flag_possible_infection = 1

                                    if flag_possible_infection:
                                        for doctor in G.neighbors(
                                                n
                                        ):  # then the doctors in that shift, gets infected with prob_infection

                                            for i in range(shift_lenght):
                                                if G.node[doctor][
                                                        "status"] == "S":
                                                    rand = random.random()
                                                    if rand < prob_infection:  # with prob p the infection occurres

                                                        G.node[doctor][
                                                            "infec_value"] += dose  # and bumps the infection_value of that susceptible dr

                                                        if G.node[doctor][
                                                                "infec_value"] >= infect_threshold:  # becomes  infected

                                                            G.node[doctor][
                                                                "status"] = "I"
                                                            if G.node[doctor][
                                                                    "type"] == "A":  # fellows participate in the dynamics, but i only consider the attendings as real adopters
                                                                list_I.append(
                                                                    G.node[
                                                                        doctor]
                                                                    ["label"])

                        # for node in G.nodes():
                        #   if G.node[node]['type']!="shift":
                        #     print t, G.node[node]['label'], G.node[node]["infec_value"]
                        #raw_input()
                            list_single_t_evolution.append(float(len(list_I)))

                            t += 1
                            ######## end t loop

                        list_lists_t_evolutions.append(list_single_t_evolution)

                        list_dist_fixed_parameters.append(
                            compare_real_evol_vs_simus_to_be_called.
                            compare_two_curves(list_actual_evol,
                                               list_single_t_evolution))

                        list_abs_dist_at_ending_point_fixed_parameters.append(
                            abs(list_single_t_evolution[-1] -
                                list_actual_evol[-1])
                        )  # i save the distance at the ending point between the current simu and actual evol

                        list_dist_at_ending_point_fixed_parameters.append(
                            list_single_t_evolution[-1] - list_actual_evol[-1]
                        )  # i save the distance at the ending point between the current simu and actual evol

                        list_final_num_infected.append(
                            list_single_t_evolution[-1])

                        ######## end loop Niter

                    list_pair_dist_std_delta_end = []

                    list_pair_dist_std_delta_end.append(
                        numpy.mean(list_dist_fixed_parameters)
                    )  # average dist between the curves over Niter
                    list_pair_dist_std_delta_end.append(
                        numpy.std(list_dist_fixed_parameters))

                    list_pair_dist_std_delta_end.append(
                        numpy.mean(
                            list_abs_dist_at_ending_point_fixed_parameters))

                    if for_testing_fixed_set == "NO":
                        file3 = open(output_file3,
                                     'at')  # i print out the landscape
                        print >> file3, prob_infection, prob_Immune, numpy.mean(
                            list_abs_dist_at_ending_point_fixed_parameters
                        ), numpy.mean(list_dist_fixed_parameters), numpy.mean(
                            list_final_num_infected), numpy.std(
                                list_final_num_infected
                            ), numpy.std(list_final_num_infected) / numpy.mean(
                                list_final_num_infected)
                        file3.close()

                    if (
                            numpy.mean(
                                list_abs_dist_at_ending_point_fixed_parameters)
                    ) <= delta_end:  # i only consider situations close enough at the ending point

                        dict_filenames_tot_distance[
                            output_file2] = list_pair_dist_std_delta_end

                    file2 = open(output_file2, 'at')
                    for s in range(len(list_single_t_evolution)):
                        list_fixed_t = []
                        for iter in range(Niter):
                            list_fixed_t.append(
                                list_lists_t_evolutions[iter][s])
                        print >> file2, s, numpy.mean(list_fixed_t)
                    file2.close()

                    print "printed out: ", output_file2

                    if for_testing_fixed_set == "YES":

                        num_valid_endings = 0.
                        for item in list_abs_dist_at_ending_point_fixed_parameters:
                            if item <= delta_end:  # i count how many realizations i get close enough at the ending point
                                num_valid_endings += 1.

                        print "average distance of the optimum in the testing segment:", numpy.mean(
                            list_dist_fixed_parameters), numpy.std(
                                list_dist_fixed_parameters
                            ), list_dist_fixed_parameters, "\n"
                        print "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter, "mean ending dist:", numpy.mean(
                            list_dist_at_ending_point_fixed_parameters
                        ), "SD final dist", numpy.std(
                            list_dist_at_ending_point_fixed_parameters
                        ), list_dist_at_ending_point_fixed_parameters, "\n"

                        histogram_filename = "../Results/weight_shifts/histogr_raw_distances_ending_infection_memory_p" + str(
                            prob_infection
                        ) + "_Immune" + str(prob_Immune) + "_threshold" + str(
                            infect_threshold) + "_dose" + str(
                                dose) + "_" + str(Niter) + "iter_day" + str(
                                    cutting_day) + ".dat"
                        histograma_gral_negv_posit.histograma(
                            list_dist_at_ending_point_fixed_parameters,
                            histogram_filename)

                        histogram_filename2 = "../Results/weight_shifts/histogr_sum_dist_traject_infection_memory_p" + str(
                            prob_infection
                        ) + "_Immune" + str(prob_Immune) + "_threshold" + str(
                            infect_threshold) + "_dose" + str(
                                dose) + "_" + str(Niter) + "iter_day" + str(
                                    cutting_day) + ".dat"
                        histograma_bines_gral.histograma_bins(
                            list_dist_fixed_parameters, Nbins,
                            histogram_filename2)

                    output_file10 = "../Results/weight_shifts/Summary_results_training_segment_infection_memory_distrib_p" + str(
                        prob_infection) + "_" + "FIXED_Immune" + str(
                            prob_Immune) + "_FIXED_threshold" + str(
                                infect_threshold
                            ) + "_dose" + str(dose) + "_" + str(
                                Niter) + "iter_day" + str(cutting_day) + ".dat"
                    file10 = open(output_file10, 'wt')

                    print >> file10, "Summary results from train-testing infection with", Niter, "iter, and with values for the parameters:  prob_inf ", prob_infection, " prob immune: ", prob_Immune, "infect. threshold:", infect_threshold, "dose:", dose, "\n"

                    print >> file10, "average distance of the optimum in the testing segment:", numpy.mean(
                        list_dist_fixed_parameters), numpy.std(
                            list_dist_fixed_parameters
                        ), list_dist_fixed_parameters, "\n"
                    print >> file10, "fraction of realizations that end within delta_doctor:", num_valid_endings / Niter, "mean ending dist:", numpy.mean(
                        list_dist_at_ending_point_fixed_parameters
                    ), "SD final dist", numpy.std(
                        list_dist_at_ending_point_fixed_parameters
                    ), list_dist_at_ending_point_fixed_parameters, "\n"

                    print >> file10, "written optimum train_test evolution file:", output_file2
                    print >> file10, "written histogram file: ", histogram_filename

                    file10.close()

                    print "written Summary file: ", output_file10
                    print "written histogram file: ", histogram_filename

                    if envelopes == "YES":
                        calculate_envelope_set_curves.calculate_envelope(
                            list_lists_t_evolutions, percent_envelope,
                            "Infection_memory_fixed", [
                                prob_infection, prob_Immune, infect_threshold,
                                dose
                            ])

                    dose += delta_dose
                infect_threshold += delta_infect_threshold
            prob_infection += delta_prob
        prob_Immune += delta_prob_Immune

        if for_testing_fixed_set == "NO":  # only if i am exploring the whole landscape, i need to call this function, otherwise, i already know the optimum
            compare_real_evol_vs_simus_to_be_called.pick_minimum_same_end(
                dict_filenames_tot_distance, "Infection_memory", all_team,
                Niter, None)
            print "written landscape file:", output_file3