def main(args):
    mc_small = read_file(L=args.L // 2, n_samples=args.nTE)
    mc_large = read_file(L=args.L, n_samples=args.nTE)

    set_GPU_memory(fraction=args.GPU)
    model = ModelLoader(list_ind=args.Mind)

    if args.TEST > args.nTE:
        args.TEST = args.nTE

    obs = np.zeros([len(T_list), 5, 7])

    ## First upsampling
    T_ren = 2.0 / np.arccosh(np.exp(2.0 / T_list))
    for (iT, T) in enumerate(T_ren):
        ## Update model temperature ##
        T_model = T_list[np.abs(T - T_list).argmin()]
        model.update_temperature(T=T_model)

        ## Make predictions ##
        data_in = temp_partition(mc_small, iT, n_samples=args.nTE)
        pred = model.graph.predict(add_index(data_in))
        ## Calculate observables ##
        obs[iT] = calculate_observables_real(temp_partition(
            mc_large, iT, n_samples=args.nTE),
                                             data_in,
                                             pred[:, :, 0],
                                             T=T_list[iT],
                                             Tr=T)

        print('Temperature %d / %d done!' % (iT + 1, len(T_list)))

    ## Save observables ##
    create_directory(quantities_real_dir)
    np.save(quantities_real_dir + '/%s.npy' % (model.name), obs)
Esempio n. 2
0
def main(args):
    set_GPU_memory(fraction=args.GPU)

    ### HF, K default values ###
    if args.HF == None:
        args.HF = [64, 32]
    if args.K == None:
        args.K = [5, 1, 3]

    ### Check data sizes ###
    if args.TRS > args.nTR:
        args.TRS = args.nTR
    if args.VALS > args.nTE:
        args.VALS = args.nTE

    print('Hidden Filters: ' + str(args.HF))
    print('Kernels: ' + str(args.K))

    data = TrainingData(args)
    args.model_dir = models_save_dir
    args.metrics_dir = metrics_save_dir

    if args.Tind == None:
        args.Tind = range(len(T_list))

    args.T_list = T_list[args.Tind]

    trainer = TrainerTemp(args)

    trainer.train(data)
    return
def main(args):    
    ## Renormalized temperature (inverse)    
    T_ren_inv = np.array([0.01, 0.01, 0.01, 0.01, 0.01,
       1.21835191, 1.22976684, 1.39674347, 1.51484435, 1.65761354,
       1.75902208, 1.85837041, 1.95260925, 2.07132396, 2.13716533,
       2.25437054, 2.29606717, 2.38018868, 2.44845189, 2.51316151,
       2.58725426, 2.6448879 , 2.7110948 , 2.74426717, 2.81525268,
       2.87031377, 2.90806294, 2.98742994, 3.03780331, 3.10501399,
       3.17323991, 3.19663683])
    
    ## Read data ##
    L_list = [args.L, args.L//2, args.L]
    data_or = temp_partition(add_index(read_file(L=args.L, n_samples=args.nTE)), args.iT, n_samples=args.nTE)
    data_in = temp_partition(add_index(read_file(L=args.L//2, n_samples=args.nTE)), args.iT, n_samples=args.nTE)
    
    ## Set model ##
    set_GPU_memory(fraction=args.GPU)
    model = ModelLoader(list_ind=args.Mind, critical=args.CR)
    print('\nModel %s loaded.'%model.name)
    print('Temperature = %.4f\n'%T_list[args.iT])

    if args.TEST > args.nTE:
        args.TEST = args.nTE
    
    ## Find transformed temperatures ##
    Tr = T_ren_inv[args.iT]
        
    ## Find closer value from T_list to update model temperature ##
    iT_closer = np.abs(T_list - Tr).argmin()
    model.update_temperature(T=T_list[iT_closer])
    extrapolated_model = duplicate(model.graph, data_in.shape,
                                   hid_filters=args.HF, kernels=args.K, hid_act=args.ACT)
    
    print('\nModel %s loaded.'%model.name)
    print('Temperature = %.4f'%T_list[args.iT])
    print('Renormalized temperatute = %.4f\n'%T_list[iT_closer])
    
    ## Make predictions ##
    pred_cont = extrapolated_model.predict(data_in)
    
    ## Calculate observables ##
    data_list = [temp_partition(read_file(L=args.L, n_samples=args.nTE), iT_closer),
                 temp_partition(read_file(L=args.L//2, n_samples=args.nTE), iT_closer),
                 pred_cont[:,:,:,0] > np.random.random(pred_cont.shape[:-1])]
    
    obj_list = [Ising(2 * x - 1) for x in data_list]
    obs = np.zeros([3, 2, args.nTE])
    for (i, obj) in enumerate(obj_list):
        obj._calculate_magnetization()
        obj._calculate_energy()
        obs[i, 0] = np.abs(obj.sample_mag) * 1.0 / L_list[i]**2
        obs[i, 1] = obj.sample_energy * 1.0 / L_list[i]**2
        
    create_directory(seaborn_dir)
    np.save(seaborn_dir + '/%s_extr_iT%d.npy'%(model.name, args.iT), np.array(obs))
def main(args):
    args.CR = True
    args.model_dir = models_critical_save_dir
    args.metrics_dir= metrics_critical_save_dir
    set_GPU_memory(fraction=args.GPU)
    
    ### HF, K default values ###
    if args.HF == None:
        args.HF = [64, 32]
    if args.K == None:
        args.K = [5, 1, 3]
    
    if args.PRreg:
        L0 = int(np.log2(args.L))
        L_list = 2**np.arange(L0, L0+1+args.UP)
        
    if args.CB:
        from networks.consecutive import upsampling_batches as upsampling
        print('Upsampling with batches')
    else:
        from networks.consecutive import upsampling
    
    data = TrainingData(args)
    trainer = TrainerCritical(args)
    observables = []
    for iC in range(args.C):
        trainer.compiler(data)
        trainer.train(data, run_time=iC)
        obs = upsampling(data.test_out, trainer.model, args)
        observables.append(obs)
        
        if args.PRreg:
            print('Beta:')
            print(linregress(np.log10(L_list), np.log10(obs[0])))
            print('Gamma:')
            print(linregress(np.log10(L_list), np.log10(obs[2])))
            if args.TPF:
                print('Eta1:')
                print(linregress(np.log10(L_list/2.0), np.log10(obs[7])))
                print('Eta2:')
                print(linregress(np.log10(L_list/4.0), np.log10(obs[8])))
    
    create_directory(multiple_exponents_dir)
    np.save('%s/%s_C%dUP%dVER%d.npy'%(multiple_exponents_dir, trainer.name,
                             args.C, args.UP, args.VER), np.array(observables))
Esempio n. 5
0
def main(args):
    set_GPU_memory(fraction=args.GPU)

    ### HF, K default values ###
    if args.HF == None:
        args.HF = [64, 32]
    if args.K == None:
        args.K = [5, 1, 3]

    ### Check data sizes ###
    if args.TRS > args.nTR:
        args.TRS = args.nTR
    if args.VALS > args.nTE:
        args.VALS = args.nTE

    print('Hidden Filters: ' + str(args.HF))
    print('Kernels: ' + str(args.K))

    data = TrainingData(args)
    if args.CR:
        from data.directories import models_critical_save_dir, metrics_critical_save_dir
        from networks.train import TrainerCritical
        args.model_dir = models_critical_save_dir
        args.metrics_dir = metrics_critical_save_dir

        trainer = TrainerCritical(args)
        trainer.compiler(data)

    else:
        from data.directories import models_save_dir, metrics_save_dir, T_list
        from networks.train import TrainerTemp
        args.model_dir = models_save_dir
        args.metrics_dir = metrics_save_dir

        if args.Tind == None:
            args.Tind = range(len(T_list))

        args.T_list = T_list[args.Tind]

        trainer = TrainerTemp(args)

    trainer.train(data)
    return
Esempio n. 6
0
def main(args):
    data = TestData(args)
    set_GPU_memory(fraction=args.GPU)

    model = ModelLoader(list_ind=args.Mind)

    if args.TEST > args.nTE:
        args.TEST = args.nTE

    if args.OUT:
        from data.directories import output_dir
        create_directory(output_dir + '/' + model.name)

    if args.Tind == None:
        args.Tind = np.arange(len(T_list))

    obs = np.zeros([len(args.Tind), 5, 7])
    for (iT, T) in enumerate(T_list[args.Tind]):
        ## Update model temperature ##
        model.update_temperature(T=T)

        ## Make predictions ##
        data_in = temp_partition(data.test_in, iT, n_samples=args.nTE)
        pred_cont = model.graph.predict(data_in)

        ## Calculate observables ##
        obs[iT] = calculate_observables(temp_partition(data.test_out[:, :, 0],
                                                       iT,
                                                       n_samples=args.nTE),
                                        data_in[:, :, 0],
                                        pred_cont[:, :, 0],
                                        T=T)

        ## Save network output ##
        if args.OUT:
            np.save(output_dir + '/%s/T%.4f.npy' % (model.name, T), pred_cont)

        print('Temperature %d / %d done!' % (iT + 1, len(args.Tind)))

    ## Save observables ##
    create_directory(quantities_dir)
    np.save(quantities_dir + '/%s.npy' % model.name, np.array(obs))
Esempio n. 7
0
def main(args):
    data = TestData(args)
    set_GPU_memory(fraction=args.GPU)
    
    model = ModelLoader(list_ind=args.Mind, critical=args.CR)
    
    print('\nInitiating testing with %s'%model.name)
    print('Output is%s saved.\n'%[' not', ''][int(args.OUT)])
    
    if args.TEST > args.nTE:
        args.TEST = args.nTE
        
    if args.CR:
        from data.directories import quantities_critical_dir
        
        ## Make predictions ##
        pred_cont = model.graph.predict(data.test_in[:args.TEST])
        
        ## Calculate observables ##
        obs = calculate_observables(data.test_out[:args.TEST,:,:,0], 
                                    data.test_in[:args.TEST,:,:,0], 
                                    pred_cont[:,:,:,0],
                                    T = 2 / np.log(1 + np.sqrt(2)))
        
        ## Save observables ##
        create_directory(quantities_critical_dir)
        np.save(quantities_critical_dir + '/%s_samples%d.npy'%(model.name, 
                                                               args.TEST), obs)
        
        ## Save network output ##
        if args.OUT:
            from data.directories import output_critical_dir
            create_directory(output_critical_dir)
            np.save(output_critical_dir + '/%s_samples%d.npy'%(
                    model.name, args.TEST), pred_cont)
    
    else:
        from data.directories import quantities_dir, T_list
        from data.loaders import temp_partition
                
        if args.OUT:
            from data.directories import output_dir
            create_directory(output_dir + '/' + model.name)
        
        if args.Tind == None:
            args.Tind = np.arange(len(T_list))
        
        obs = np.zeros([len(args.Tind), 5, 12])
        for (iT, T) in enumerate(T_list[args.Tind]):
            ## Update model temperature ##
            model.update_temperature(T=T)
            
            ## Make predictions ##
            data_in = temp_partition(data.test_in, iT, n_samples=args.nTE)
            pred_cont = model.graph.predict(data_in)
            
            ## Calculate observables ##
            obs[iT] = calculate_observables(
                    temp_partition(data.test_out[:,:,:,0], iT, n_samples=args.nTE),
                    data_in[:,:,:,0], pred_cont[:,:,:,0], T=T)
                    
            ## Save network output ##
            if args.OUT:
                np.save(output_dir + '/%s/T%.4f.npy'%(model.name, T), pred_cont)
                
            print('Temperature %d / %d done!'%(iT+1, len(args.Tind)))
        
        ## Save observables ##
        create_directory(quantities_dir)
        np.save(quantities_dir + '/%s.npy'%model.name, np.array(obs))
Esempio n. 8
0
def main(args):
    if args.PBC:
        from networks.architectures import duplicate_simple2D_pbc as duplicate
    else:
        from networks.architectures import duplicate_simple2D as duplicate

    ## Renormalized temperature (inverse)
    T_ren_inv = np.array([
        0.01, 0.01, 0.01, 0.01, 0.01, 1.21835191, 1.22976684, 1.39674347,
        1.51484435, 1.65761354, 1.75902208, 1.85837041, 1.95260925, 2.07132396,
        2.13716533, 2.25437054, 2.29606717, 2.38018868, 2.44845189, 2.51316151,
        2.58725426, 2.6448879, 2.7110948, 2.74426717, 2.81525268, 2.87031377,
        2.90806294, 2.98742994, 3.03780331, 3.10501399, 3.17323991, 3.19663683
    ])

    ## Read data ##
    data_or = read_file(L=args.L, n_samples=args.nTE)
    data_in = add_index(read_file(L=args.L // 2, n_samples=args.nTE))

    ## Set model ##
    set_GPU_memory(fraction=args.GPU)
    model = ModelLoader(list_ind=args.Mind, critical=args.CR)

    if args.TEST > args.nTE:
        args.TEST = args.nTE

    if args.OUT:
        from data.directories import output_dir
        create_directory(output_dir + '/' + model.name)

    if args.Tind == None:
        args.Tind = np.arange(len(T_list))

    obs = np.zeros([len(args.Tind), 5, 12])
    for (iT, T) in enumerate(T_list[args.Tind]):
        ## Find transformed temperatures ##
        Tr = T_ren_inv[iT]

        ## Find closer value from T_list to update model temperature ##
        T_closer = T_list[np.abs(T_list - Tr).argmin()]
        model.update_temperature(T=T_closer)
        extrapolated_model = duplicate(model.graph,
                                       data_in.shape,
                                       hid_filters=args.HF,
                                       kernels=args.K,
                                       hid_act=args.ACT)

        ## Make predictions ##
        data_in_T = temp_partition(data_in, iT, n_samples=args.nTE)
        pred_cont = extrapolated_model.predict(data_in_T)

        ## Calculate observables ##
        if iT > 5:
            Tr_calc = Tr
        else:
            Tr_calc = T
        obs[iT] = calculate_observables_real(temp_partition(
            data_or, iT, n_samples=args.nTE),
                                             data_in_T[:, :, :, 0],
                                             pred_cont[:, :, :, 0],
                                             T=T,
                                             Tr=Tr_calc)

        ## Save network output ##
        if args.OUT:
            np.save(output_dir + '/%s/T%.4f.npy' % (model.name, T), pred_cont)

        print('Temperature %d / %d done!' % (iT + 1, len(args.Tind)))

    ## Save observables ##
    create_directory(quantities_real_dir)
    np.save(quantities_real_dir + '/%s_extr.npy' % model.name, np.array(obs))
parser.add_argument('-HF', nargs='+', type=int, default=None, help='hidden filters list')
parser.add_argument('-K', nargs='+', type=int, default=None, help='kernels list')

parser.add_argument('-GPU', type=float, default=0.3, help='GPU memory fraction')
parser.add_argument('-L', type=int, default=16, help='output size')
parser.add_argument('-nTE', type=int, default=100000, help='test samples')
parser.add_argument('-VER', type=int, default=1, help='version for name')

args = parser.parse_args()
args.CR = True

from data.directories import models_critical_save_dir as basic_dir
name = [o for o in os.listdir(basic_dir) 
if os.path.isdir(os.path.join(basic_dir,o))][args.NET]

set_GPU_memory(fraction=args.GPU)
if args.PRreg:
    L0 = int(np.log2(args.L))
    L_list = 2**np.arange(L0, L0+1+args.UP)

## Load model ##
#model = ModelLoader(args.NET, critical=True)

data = add_index(read_file_GPU(L=args.L))
#data = add_index(read_file_critical(L=args.L, n_samples=args.nTE))
observables = []
for model_name in os.listdir(os.path.join(basic_dir, name)):
    print(data.shape)
    ## Load model ##
    model = critical_model_from_file(os.path.join(basic_dir, name, model_name))
    print('\n%s\n'%model_name)
Esempio n. 10
0
def main(args):
    data_mc = read_file(L=args.L, n_samples=args.nTE)
    
    set_GPU_memory(fraction=args.GPU)
    model = ModelLoader(list_ind=args.Mind)
    
    if args.TEST > args.nTE:
        args.TEST = args.nTE
        
    tpf = np.zeros([args.UP, len(T_list)])
    obs = np.zeros([args.UP, len(T_list), 3, 7])
    ## WARNING: Does not contain original MC observables
    
    pred_cont = []
    ## First upsampling
    T_ren = 2.0 / np.arccosh(np.exp(2.0 / T_list))
    for (iT, T) in enumerate(T_ren):
        ## Update model temperature ##
        T_model = T_list[np.abs(T - T_list).argmin()]
        model.update_temperature(T=T_model)
                
        ## Make predictions ##
        pred_cont.append(make_prediction(
                data_in=add_index(temp_partition(data_mc, iT, n_samples=args.nTE)),
                graph=model.graph, hid_filters=args.HF, kernels=args.K, hid_act=args.ACT))
        
        if iT < args.TS:
            pred_cont[iT] = np.round(pred_cont[iT])
        else:
            pred_cont[iT] = (pred_cont[iT] > np.random.random(pred_cont[iT].shape)).astype(np.int)
                
                
        ## Calculate observables ##
        obs[0, iT] = calculate_observables_rep(pred_cont[iT][:,:,0], Tr=T)
        tpf[0, iT] = two_point_function(2 * pred_cont[iT][:,:,0] - 1, k=int(pred_cont[iT].shape[1]**0.8/5))
        
        print('Temperature %d / %d done!'%(iT+1, len(T_list)))
        
    print('\nUpsampling 1 / %d completed!\n'%args.UP)
    
    for iUP in range(1, args.UP):
        T_ren = 2.0 / np.arccosh(np.exp(2.0 / T_ren))
        for (iT, T) in enumerate(T_ren):
            T_model = T_list[np.abs(T - T_list).argmin()]
            model.update_temperature(T=T_model)
                    
            ## Make predictions ##
            if iT < args.TS:
                pred_cont[iT] = np.round(pred_cont[iT])
            else:
                pred_cont[iT] = (pred_cont[iT] > np.random.random(pred_cont[iT].shape)).astype(np.int)
            
            pred_cont[iT] = make_prediction(data_in=pred_cont[iT], graph=model.graph, 
                     hid_filters=args.HF, kernels=args.K, hid_act=args.ACT)

            ## Calculate observables ##
            obs[iUP, iT] = calculate_observables_rep(pred_cont[iT][:,:,0], Tr=T)
            tpf[iUP, iT] = two_point_function(2 * pred_cont[iT][:,:,0] - 1, k=int(pred_cont[iT].shape[1]**0.8/5))
            
            print('Temperature %d / %d done!'%(iT+1, len(T_list)))
        
        print('\nUpsampling %d / %d completed!\n'%(iUP+1, args.UP))
            
    ## Save observables ##
    create_directory(quantities_rep_dir)
    np.save(quantities_rep_dir + '/%s_TS%d_UP%d_VER%d.npy'%(model.name, args.TS, args.UP, args.VER), obs)
    np.save(quantities_rep_dir + '/%s_TS%d_TPF_N085.npy'%(model.name, args.TS), tpf)