def Sim(data,VARS=VARS):
    VARS=VARS
    F =[]
    beta=rgh.beta
    total_wt=0
    domain={}

    for i in range(DOMAIN_NUMBER):
        #do this part by fitting u and OC instead
        #extract the fitting par values in the associated attribute and then do the scaling(initiation+processing, actually update the fitting parameter values)
        #VARS['domain_class_'+str(int(i+1))].init_sim_batch(batch_path_head+VARS['sim_batch_file_domain'+str(int(i+1))])
        #VARS['domain_class_'+str(int(i+1))].scale_opt_batch(batch_path_head+VARS['scale_operation_file_domain'+str(int(i+1))])
        
        #create matching lib dynamically during fitting
        if USE_BV:
            vars()['match_lib_fitting_'+str(i+1)+'A'],vars()['match_lib_fitting_'+str(i+1)+'B']=deepcopy(VARS['match_lib_'+str(i+1)+'A']),deepcopy(VARS['match_lib_'+str(i+1)+'B'])
            create_match_lib_during_fitting(domain_class=VARS['domain_class_'+str(int(i+1))],domain=VARS['domain'+str(int(i+1))+'A'],atm_list=VARS['atm_list_'+str(int(i+1))+'A'],pb_list=VARS['pb_list_domain'+str(int(i+1))+'a'],HO_list=VARS['HO_list_domain'+str(int(i+1))+'a'],match_lib=vars()['match_lib_fitting_'+str(int(i+1))+'A'])
        
        #grap wt for each domain and cal the total wt
        vars()['wt_domain'+str(int(i+1))]=VARS['rgh_domain'+str(int(i+1))].wt
        total_wt=total_wt+vars()['wt_domain'+str(int(i+1))]

        #update sorbates
        for j in range(VARS['Pb_NUMBER'][i]):
            pb_coors_a=[]
            O_coors_a=[]
            if len(VARS['Pb_ATTACH_ATOM'][i][j])==1:#monodentate case
                phi=getattr(VARS['rgh_domain'+str(int(i+1))],'phi')
                r=getattr(VARS['rgh_domain'+str(int(i+1))],'r')
                ids=[VARS['Pb_ATTACH_ATOM'][i][j][0]+'_D'+str(int(i+1))+'A']
                offset=VARS['Pb_ATTACH_ATOM_OFFSET'][i][j][0]
                pb_id=VARS['pb_list_domain'+str(int(i+1))+'a'][j]#pb_id is a str NOT list
                O_index=[0]+[sum(VARS['O_NUMBER'][i][0:ii+1]) for ii in range(len(VARS['O_NUMBER'][i]))]
                #for [1,2,2], which means inside one domain there are 1OH corresponding to pb1, 2 OH's corresponding to pb2 and so son.
                #will return [0,1,3,5], O_id extract OH according to O_index
                O_id=VARS['HO_list_domain'+str(int(i+1))+'a'][O_index[j]:O_index[j+1]]#O_ide is a list of str
                sorbate_coors=VARS['domain_class_'+str(int(i+1))].adding_sorbate_bipyramid_monodentate(domain=VARS['domain'+str(int(i+1))+'A'],phi=phi,r=r,attach_atm_id=ids,offset=offset,pb_id=pb_id,O_id=O_id)           
                pb_coors_a.append(sorbate_coors[0])
                [O_coors_a.append(sorbate_coors[k]) for k in range(len(sorbate_coors))[1:]]
                pb_id_B=VARS['pb_list_domain'+str(int(i+1))+'b'][j]
                O_id_B=VARS['HO_list_domain'+str(int(i+1))+'b'][O_index[j]:O_index[j+1]]
                #now put on sorbate on the symmetrically related domain
                sorbate_ids=[pb_id_B]+O_id_B
                sorbate_els=['Pb']+['O']*(len(O_id_B))
                add_atom(domain=VARS['domain'+str(int(i+1))+'B'],ref_coor=np.array(pb_coors_a+O_coors_a)*[-1,1,1]-[-1.,0.06955,0.5],ids=sorbate_ids,els=sorbate_els)
            elif len(VARS['Pb_ATTACH_ATOM'][i][j])==2:#bidentate case
                theta=getattr(VARS['rgh_domain'+str(int(i+1))],'theta')
                phi=getattr(VARS['rgh_domain'+str(int(i+1))],'phi')
                ids=[VARS['Pb_ATTACH_ATOM'][i][j][0]+'_D'+str(int(i+1))+'A',VARS['Pb_ATTACH_ATOM'][i][j][1]+'_D'+str(int(i+1))+'A']
                offset=VARS['Pb_ATTACH_ATOM_OFFSET'][i][j]
                pb_id=VARS['pb_list_domain'+str(int(i+1))+'a'][j]
                O_index=[0]+[sum(VARS['O_NUMBER'][i][0:ii+1]) for ii in range(len(VARS['O_NUMBER'][i]))]
                O_id=VARS['HO_list_domain'+str(int(i+1))+'a'][O_index[j]:O_index[j+1]]
                sorbate_coors=VARS['domain_class_'+str(int(i+1))].adding_sorbate_trigonal_bipyramid(domain=VARS['domain'+str(int(i+1))+'A'],theta=theta,phi=phi,flag=VARS['FLAG'][i][j],extend_flag=VARS['ED_FLAG'][i][j],attach_atm_ids=ids,offset=offset,pb_id=pb_id,O_id=O_id,mirror=VARS['MIRROR'])
                pb_coors_a.append(sorbate_coors[0])
                [O_coors_a.append(sorbate_coors[k]) for k in range(len(sorbate_coors))[1:]]
                pb_id_B=VARS['pb_list_domain'+str(int(i+1))+'b'][j]
                O_id_B=VARS['HO_list_domain'+str(int(i+1))+'b'][O_index[j]:O_index[j+1]]
                #now put on sorbate on the symmetrically related domain
                sorbate_ids=[pb_id_B]+O_id_B
                sorbate_els=['Pb']+['O']*(len(O_id_B))
                add_atom(domain=VARS['domain'+str(int(i+1))+'B'],ref_coor=np.array(pb_coors_a+O_coors_a)*[-1,1,1]-[-1.,0.06955,0.5],ids=sorbate_ids,els=sorbate_els)
            elif len(VARS['Pb_ATTACH_ATOM'][i][j])==3:#tridentate case (no oxygen sorbate here considering it is a trigonal pyramid structure)
                ids=[VARS['Pb_ATTACH_ATOM'][i][j][0]+'_D'+str(int(i+1))+'A',VARS['Pb_ATTACH_ATOM'][i][j][1]+'_D'+str(int(i+1))+'A',VARS['Pb_ATTACH_ATOM'][i][j][2]+'_D'+str(int(i+1))+'A']
                offset=VARS['Pb_ATTACH_ATOM_OFFSET'][i][j]
                pb_id=VARS['pb_list_domain'+str(int(i+1))+'a'][j]
                O_index=[0]+[sum(VARS['O_NUMBER'][i][0:ii+1]) for ii in range(len(VARS['O_NUMBER'][i]))]
                O_id=VARS['HO_list_domain'+str(int(i+1))+'a'][O_index[j]:O_index[j+1]]
                sorbate_coors=VARS['domain_class_'+str(int(i+1))].adding_share_triple_trigonal_bipyramid(domain=VARS['domain'+str(int(i+1))+'A'],attach_atm_ids_ref=ids[0:2],attach_atm_id_third=[ids[-1]],offset=offset,sorbate_id=pb_id,sorbate_oxygen_ids=O_id)
                pb_coors_a.append(sorbate_coors[0])
                [O_coors_a.append(sorbate_coors[k]) for k in range(len(sorbate_coors))[1:]]
                pb_id_B=VARS['pb_list_domain'+str(int(i+1))+'b'][j]
                O_id_B=VARS['HO_list_domain'+str(int(i+1))+'b'][O_index[j]:O_index[j+1]]
                #now put on sorbate on the symmetrically related domain
                sorbate_ids=[pb_id_B]+O_id_B
                sorbate_els=['Pb']+['O']*(len(O_id_B))
                add_atom(domain=VARS['domain'+str(int(i+1))+'B'],ref_coor=np.array(pb_coors_a+O_coors_a)*[-1,1,1]-[-1.,0.06955,0.5],ids=sorbate_ids,els=sorbate_els)    
    #set up multiple domains
    #note for each domain there are two sub domains which symmetrically related to each other, so have equivalent wt
    for i in range(DOMAIN_NUMBER):
        domain['domain'+str(int(i+1))+'A']={'slab':VARS['domain'+str(int(i+1))+'A'],'wt':0.5*vars()['wt_domain'+str(int(i+1))]/total_wt}
        domain['domain'+str(int(i+1))+'B']={'slab':VARS['domain'+str(int(i+1))+'B'],'wt':0.5*vars()['wt_domain'+str(int(i+1))]/total_wt}
    
    #set up sample
    sample = model.Sample(inst, bulk, domain, unitcell,coherence=False,surface_parms={'delta1':0.,'delta2':0.1391})

    #bond valence won't be integrated as part of data sets, but you can print out to check the bv as one of the post-fitting work
    if USE_BV:
        bond_valence=[domain_class_1.cal_bond_valence3(domain=VARS['domain'+str(i+1)+'A'],match_lib=vars()['match_lib_fitting_'+str(i+1)+'A']) for i in range(DOMAIN_NUMBER)]
        for bv in bond_valence:
            for key in bv.keys():
                print "%s\t%5.3f\n"%(key,bv[key])
    
    #cal structure factor for each dataset in this for loop
    for data_set in data:
        h = data_set.extra_data['h']
        k = data_set.extra_data['k']
        l = data_set.extra_data['l']
        LB = data_set.extra_data['LB']
        dL = data_set.extra_data['dL']
        rough = (1-beta)/((1-beta)**2 + 4*beta*np.sin(np.pi*(l-LB)/dL)**2)**0.5#roughness model, double check LB and dL values are correctly set up in data file
        f = rough*sample.calc_f(h, k, l,f1f2,res_el)
        F.append(abs(f))
    #print_data(N_sorbate=4,N_atm=40,domain=domain1A,z_shift=1,save_file='D://model.xyz')    
    #export the model results for plotting if PLOT set to true
    if PLOT:
        plot_data_container_experiment={}
        plot_data_container_model={}
        for data_set in data:
            f=np.array([])   
            h = data_set.extra_data['h']
            k = data_set.extra_data['k']
            l = data_set.x
            LB = data_set.extra_data['LB']
            dL = data_set.extra_data['dL']
            I=data_set.y
            eI=data_set.error
            #make dumy hkl and f to make the plot look smoother
            l_dumy=np.arange(l[0],l[-1],0.1)
            N=len(l_dumy)
            h_dumy=np.array([h[0]]*N)
            k_dumy=np.array([k[0]]*N)
            LB_dumy=[]
            dL_dumy=[]
            for i in range(N):
                index=list(l-l_dumy[i]).index(min(l-l_dumy[i]))
                LB_dumy.append(LB[index])
                dL_dumy.append(dL[index])
            LB_dumy=np.array(LB_dumy)
            dL_dumy=np.array(dL_dumy)
            rough_dumy = (1-beta)/((1-beta)**2 + 4*beta*np.sin(np.pi*(l_dumy-LB_dumy)/dL_dumy)**2)**0.5
            f_dumy = rough_dumy*sample.calc_f(h_dumy, k_dumy, l_dumy)
            
            label=str(int(h[0]))+str(int(k[0]))+'L'
            plot_data_container_experiment[label]=np.concatenate((l[:,np.newaxis],I[:,np.newaxis],eI[:,np.newaxis]),axis=1)
            plot_data_container_model[label]=np.concatenate((l_dumy[:,np.newaxis],f_dumy[:,np.newaxis]),axis=1)
        hkls=['00L','02L','10L','11L','20L','22L','30L','2-1L','21L']
        plot_data_list=[]
        for hkl in hkls:
            plot_data_list.append([plot_data_container_experiment[hkl],plot_data_container_model[hkl]])
        pickle.dump(plot_data_list,open("D:\\Google Drive\\useful codes\\plotting\\temp_plot","wb"))
    return F
def Sim(data,VARS=VARS):
    VARS=VARS
    F =[]
    beta=rgh.beta
    total_wt=0
    domain={}

    for i in range(DOMAIN_NUMBER):
        #do this part by fitting u and OC instead
        #extract the fitting par values in the associated attribute and then do the scaling(initiation+processing, actually update the fitting parameter values)
        #VARS['domain_class_'+str(int(i+1))].init_sim_batch(batch_path_head+VARS['sim_batch_file_domain'+str(int(i+1))])
        #VARS['domain_class_'+str(int(i+1))].scale_opt_batch(batch_path_head+VARS['scale_operation_file_domain'+str(int(i+1))])
        
        #create matching lib dynamically during fitting
        if USE_BV:
            vars()['match_lib_fitting_'+str(i+1)+'A'],vars()['match_lib_fitting_'+str(i+1)+'B']=deepcopy(VARS['match_lib_'+str(i+1)+'A']),deepcopy(VARS['match_lib_'+str(i+1)+'B'])
            create_match_lib_during_fitting(domain_class=VARS['domain_class_'+str(int(i+1))],domain=VARS['domain'+str(int(i+1))+'A'],atm_list=VARS['atm_list_'+str(int(i+1))+'A'],pb_list=VARS['pb_list_domain'+str(int(i+1))+'a'],HO_list=VARS['HO_list_domain'+str(int(i+1))+'a'],match_lib=vars()['match_lib_fitting_'+str(int(i+1))+'A'])
        
        #grap wt for each domain and cal the total wt
        vars()['wt_domain'+str(int(i+1))]=VARS['rgh_domain'+str(int(i+1))].wt
        total_wt=total_wt+vars()['wt_domain'+str(int(i+1))]
        
    #set up multiple domains
    #note for each domain there are two sub domains which symmetrically related to each other, so have equivalent wt
    for i in range(DOMAIN_NUMBER):
        domain['domain'+str(int(i+1))+'A']={'slab':VARS['domain'+str(int(i+1))+'A'],'wt':0.5*vars()['wt_domain'+str(int(i+1))]/total_wt}
        domain['domain'+str(int(i+1))+'B']={'slab':VARS['domain'+str(int(i+1))+'B'],'wt':0.5*vars()['wt_domain'+str(int(i+1))]/total_wt}
    
    #set up sample
    sample = model.Sample(inst, bulk, domain, unitcell,coherence=False,surface_parms={'delta1':0.,'delta2':0.1391})

    #bond valence won't be integrated as part of data sets, but you can print out to check the bv as one of the post-fitting work
    if USE_BV:
        bond_valence=[domain_class_1.cal_bond_valence3(domain=VARS['domain'+str(i+1)+'A'],match_lib=vars()['match_lib_fitting_'+str(i+1)+'A']) for i in range(DOMAIN_NUMBER)]
        for bv in bond_valence:
            for key in bv.keys():
                print "%s\t%5.3f\n"%(key,bv[key])
    
    #cal structure factor for each dataset in this for loop
    for data_set in data:
        f=np.array([])   
        h = data_set.extra_data['h']
        k = data_set.extra_data['k']
        l = data_set.extra_data['l']
        LB = data_set.extra_data['LB']
        dL = data_set.extra_data['dL']
        rough = (1-beta)/((1-beta)**2 + 4*beta*np.sin(np.pi*(l-LB)/dL)**2)**0.5#roughness model, double check LB and dL values are correctly set up in data file
        f = rough*sample.calc_f(h, k, l,f1f2,res_el)
        F.append(abs(f))
    #print_data(N_sorbate=4,N_atm=40,domain=domain1A,z_shift=1,save_file='D://model.xyz')    
    #export the model results for plotting if PLOT set to true
    if PLOT:
        plot_data_container_experiment={}
        plot_data_container_model={}
        for data_set in data:
            f=np.array([])   
            h = data_set.extra_data['h']
            k = data_set.extra_data['k']
            l = data_set.x
            LB = data_set.extra_data['LB']
            dL = data_set.extra_data['dL']
            I=data_set.y
            eI=data_set.error
            #make dumy hkl and f to make the plot look smoother
            l_dumy=np.arange(l[0],l[-1],0.1)
            N=len(l_dumy)
            h_dumy=np.array([h[0]]*N)
            k_dumy=np.array([k[0]]*N)
            LB_dumy=[]
            dL_dumy=[]
            for i in range(N):
                index=list(l-l_dumy[i]).index(min(l-l_dumy[i]))
                LB_dumy.append(LB[index])
                dL_dumy.append(dL[index])
            LB_dumy=np.array(LB_dumy)
            dL_dumy=np.array(dL_dumy)
            rough_dumy = (1-beta)/((1-beta)**2 + 4*beta*np.sin(np.pi*(l_dumy-LB_dumy)/dL_dumy)**2)**0.5
            f_dumy = rough_dumy*sample.calc_f(h_dumy, k_dumy, l_dumy)
            
            label=str(int(h[0]))+str(int(k[0]))+'L'
            plot_data_container_experiment[label]=np.concatenate((l[:,np.newaxis],I[:,np.newaxis],eI[:,np.newaxis]),axis=1)
            plot_data_container_model[label]=np.concatenate((l_dumy[:,np.newaxis],f_dumy[:,np.newaxis]),axis=1)
        hkls=['00L','02L','10L','11L','20L','22L','30L','2-1L','21L']
        plot_data_list=[]
        for hkl in hkls:
            plot_data_list.append([plot_data_container_experiment[hkl],plot_data_container_model[hkl]])
        pickle.dump(plot_data_list,open("D:\\Google Drive\\useful codes\\plotting\\temp_plot","wb"))
    return F