def main(args):
    mc_small = read_file(L=args.L // 2, n_samples=args.nTE)
    mc_large = read_file(L=args.L, n_samples=args.nTE)

    set_GPU_memory(fraction=args.GPU)
    model = ModelLoader(list_ind=args.Mind)

    if args.TEST > args.nTE:
        args.TEST = args.nTE

    obs = np.zeros([len(T_list), 5, 7])

    ## First upsampling
    T_ren = 2.0 / np.arccosh(np.exp(2.0 / T_list))
    for (iT, T) in enumerate(T_ren):
        ## Update model temperature ##
        T_model = T_list[np.abs(T - T_list).argmin()]
        model.update_temperature(T=T_model)

        ## Make predictions ##
        data_in = temp_partition(mc_small, iT, n_samples=args.nTE)
        pred = model.graph.predict(add_index(data_in))
        ## Calculate observables ##
        obs[iT] = calculate_observables_real(temp_partition(
            mc_large, iT, n_samples=args.nTE),
                                             data_in,
                                             pred[:, :, 0],
                                             T=T_list[iT],
                                             Tr=T)

        print('Temperature %d / %d done!' % (iT + 1, len(T_list)))

    ## Save observables ##
    create_directory(quantities_real_dir)
    np.save(quantities_real_dir + '/%s.npy' % (model.name), obs)
def main(args):    
    ## Renormalized temperature (inverse)    
    T_ren_inv = np.array([0.01, 0.01, 0.01, 0.01, 0.01,
       1.21835191, 1.22976684, 1.39674347, 1.51484435, 1.65761354,
       1.75902208, 1.85837041, 1.95260925, 2.07132396, 2.13716533,
       2.25437054, 2.29606717, 2.38018868, 2.44845189, 2.51316151,
       2.58725426, 2.6448879 , 2.7110948 , 2.74426717, 2.81525268,
       2.87031377, 2.90806294, 2.98742994, 3.03780331, 3.10501399,
       3.17323991, 3.19663683])
    
    ## Read data ##
    L_list = [args.L, args.L//2, args.L]
    data_or = temp_partition(add_index(read_file(L=args.L, n_samples=args.nTE)), args.iT, n_samples=args.nTE)
    data_in = temp_partition(add_index(read_file(L=args.L//2, n_samples=args.nTE)), args.iT, n_samples=args.nTE)
    
    ## Set model ##
    set_GPU_memory(fraction=args.GPU)
    model = ModelLoader(list_ind=args.Mind, critical=args.CR)
    print('\nModel %s loaded.'%model.name)
    print('Temperature = %.4f\n'%T_list[args.iT])

    if args.TEST > args.nTE:
        args.TEST = args.nTE
    
    ## Find transformed temperatures ##
    Tr = T_ren_inv[args.iT]
        
    ## Find closer value from T_list to update model temperature ##
    iT_closer = np.abs(T_list - Tr).argmin()
    model.update_temperature(T=T_list[iT_closer])
    extrapolated_model = duplicate(model.graph, data_in.shape,
                                   hid_filters=args.HF, kernels=args.K, hid_act=args.ACT)
    
    print('\nModel %s loaded.'%model.name)
    print('Temperature = %.4f'%T_list[args.iT])
    print('Renormalized temperatute = %.4f\n'%T_list[iT_closer])
    
    ## Make predictions ##
    pred_cont = extrapolated_model.predict(data_in)
    
    ## Calculate observables ##
    data_list = [temp_partition(read_file(L=args.L, n_samples=args.nTE), iT_closer),
                 temp_partition(read_file(L=args.L//2, n_samples=args.nTE), iT_closer),
                 pred_cont[:,:,:,0] > np.random.random(pred_cont.shape[:-1])]
    
    obj_list = [Ising(2 * x - 1) for x in data_list]
    obs = np.zeros([3, 2, args.nTE])
    for (i, obj) in enumerate(obj_list):
        obj._calculate_magnetization()
        obj._calculate_energy()
        obs[i, 0] = np.abs(obj.sample_mag) * 1.0 / L_list[i]**2
        obs[i, 1] = obj.sample_energy * 1.0 / L_list[i]**2
        
    create_directory(seaborn_dir)
    np.save(seaborn_dir + '/%s_extr_iT%d.npy'%(model.name, args.iT), np.array(obs))
Esempio n. 3
0
def main(args):
    ## Decide if deep or not
    assert len(args.K) == len(args.Nw)
    if len(args.K) > 1:
        from networks.trainer_deep import Trainer
        args.WAVEP = None  ##!!! we have not fixed weight averaging for deep model yet!

    else:
        from networks.trainer import Trainer
        # Make Nw and K integers
        args.Nw, args.K = args.Nw[0], args.K[0]

    ## Load data
    train_data = dl.add_index(
        dl.Z2_zero_majority(dl.read_file(L=args.L, n_samples=10000,
                                         train=True)))
    val_data = dl.add_index(
        dl.Z2_zero_majority(
            dl.read_file(L=args.L, n_samples=10000, train=False)))

    for (iT, T) in enumerate(T_list):
        ## Prepare RBM
        args.iT = iT
        rbm = Trainer(args)

        print('\nTemperature: %.4f  -  Critical: %s' % (T, str(args.CR)))
        if isinstance(args.K, int):
            print('RBM with %d visible units and %d hidden units.' %
                  (rbm.Nv**2, rbm.Nh**2 * rbm.K))
            print('Number of weight parameters: %d.\n' % (rbm.Nw**2 * rbm.K))

        else:
            print('Created RBM with %d visible units.' % (rbm.Nv**2))
            print(args.K)
            print('\n')

        rbm.prepare_training()

        ## Set up directory for saving
        save_dir = 'Trained_Models/MultipleTemps/%s' % (rbm.name)
        dl.create_directory(save_dir)
        save_dir += '/T%.4f' % T
        dl.create_directory(save_dir)

        ## Train and save
        rbm.fit_and_save(train_data=dl.temp_partition(train_data, iT),
                         val_data=dl.temp_partition(val_data, iT)[:args.nVAL],
                         directory=save_dir)

        print('Temperature %d / %d done!' % (iT + 1, len(T_list)))

    return
Esempio n. 4
0
def main(args):
    if args.CR:
        critical_n_samples = {8: 40000, 16: 100000}
        T = 2 / np.log(1 + np.sqrt(2))

        data = dl.read_file_critical(L=args.L,
                                     n_samples=critical_n_samples[args.L],
                                     train=False)

    else:
        from data.directories import T_list
        T = T_list[args.iT]

        data = dl.temp_partition(
            dl.read_file(L=args.L, n_samples=10000, train=False), args.iT)

    ## Initialize
    rbm = Tester(args, T=T)
    print('\nInitiating testing with %s.\n' % rbm.name)

    ## Test
    rbm.prepare_testing(data)
    obs_list = rbm.test()

    ## Save
    save_dir = 'Observables/%s' % ['T%.4f' % T, 'Critical'][int(args.CR)]
    dl.create_directory(save_dir)
    save_dir += '/%s' % rbm.name
    dl.create_directory(save_dir)
    np.save('%s/k%d_%d.npy' % (save_dir, args.kI, args.kF), obs_list)

    ## Saved .npy format: Array (different k (0 = correct), 12 observables)
    return
Esempio n. 5
0
def main(args):
    data = dl.read_file(L=args.L, n_samples=10000, train=False)

    obs_list = []
    for (iT, T) in enumerate(T_list):
        ## Initialize
        args.iT = iT
        rbm = TesterTemps(args, T=T)
        print('\nInitiating testing with %s.\n' % rbm.name)

        ## Test
        rbm.prepare_testing(dl.temp_partition(data, iT))
        obs_list.append(rbm.test())

        print('Temperature %d / %d done!' % (iT + 1, len(T_list)))

    obs_list = np.array(obs_list)

    ## Save
    save_dir = 'Observables/%s' % rbm.name
    dl.create_directory(save_dir)

    np.save('%s/k%d_%d.npy' % (save_dir, args.kI, args.kF), obs_list)
    ## Saved .npy format: Array (temps, different k (0 = correct), 12 observables)

    # Save only the last converged k
    np.save('%s/k%d_converged.npy' % (save_dir, args.kF),
            np.concatenate((obs_list[:, 0, :], obs_list[:, -1, :])))
    ## Saved .npy format: Array ((0=correct, 1=converged),temps, 12 observables)

    return
Esempio n. 6
0
def main(args):
    if args.PBC:
        from networks.architectures import duplicate_simple2D_pbc as duplicate
    else:
        from networks.architectures import duplicate_simple2D as duplicate

    ## Renormalized temperature (inverse)
    T_ren_inv = np.array([
        0.01, 0.01, 0.01, 0.01, 0.01, 1.21835191, 1.22976684, 1.39674347,
        1.51484435, 1.65761354, 1.75902208, 1.85837041, 1.95260925, 2.07132396,
        2.13716533, 2.25437054, 2.29606717, 2.38018868, 2.44845189, 2.51316151,
        2.58725426, 2.6448879, 2.7110948, 2.74426717, 2.81525268, 2.87031377,
        2.90806294, 2.98742994, 3.03780331, 3.10501399, 3.17323991, 3.19663683
    ])

    ## Read data ##
    data_or = read_file(L=args.L, n_samples=args.nTE)
    data_in = add_index(read_file(L=args.L // 2, n_samples=args.nTE))

    ## Set model ##
    set_GPU_memory(fraction=args.GPU)
    model = ModelLoader(list_ind=args.Mind, critical=args.CR)

    if args.TEST > args.nTE:
        args.TEST = args.nTE

    if args.OUT:
        from data.directories import output_dir
        create_directory(output_dir + '/' + model.name)

    if args.Tind == None:
        args.Tind = np.arange(len(T_list))

    obs = np.zeros([len(args.Tind), 5, 12])
    for (iT, T) in enumerate(T_list[args.Tind]):
        ## Find transformed temperatures ##
        Tr = T_ren_inv[iT]

        ## Find closer value from T_list to update model temperature ##
        T_closer = T_list[np.abs(T_list - Tr).argmin()]
        model.update_temperature(T=T_closer)
        extrapolated_model = duplicate(model.graph,
                                       data_in.shape,
                                       hid_filters=args.HF,
                                       kernels=args.K,
                                       hid_act=args.ACT)

        ## Make predictions ##
        data_in_T = temp_partition(data_in, iT, n_samples=args.nTE)
        pred_cont = extrapolated_model.predict(data_in_T)

        ## Calculate observables ##
        if iT > 5:
            Tr_calc = Tr
        else:
            Tr_calc = T
        obs[iT] = calculate_observables_real(temp_partition(
            data_or, iT, n_samples=args.nTE),
                                             data_in_T[:, :, :, 0],
                                             pred_cont[:, :, :, 0],
                                             T=T,
                                             Tr=Tr_calc)

        ## Save network output ##
        if args.OUT:
            np.save(output_dir + '/%s/T%.4f.npy' % (model.name, T), pred_cont)

        print('Temperature %d / %d done!' % (iT + 1, len(args.Tind)))

    ## Save observables ##
    create_directory(quantities_real_dir)
    np.save(quantities_real_dir + '/%s_extr.npy' % model.name, np.array(obs))
Esempio n. 7
0
def main(args):
    if args.CR:
        T = 2 / np.log(1 + np.sqrt(2))

        rbm, rbm_name = get_model(args.Mind, Nv=args.L, critical=True)
        data = dl.read_file_critical(L=args.L, n_samples=100000, train=False)

    else:
        from data.directories import T_list
        T = T_list[args.iT]

        rbm, rbm_name = get_model(args.Mind, Nv=args.L, critical=False, T=T)
        data = dl.temp_partition(
            dl.read_file(L=args.L, n_samples=10000, train=False), args.iT)

    ## Calculate MC observables
    obs_correct = get_observables_with_corr_and_tpf(data, T)

    n_batches = args.nTE // args.BSC

    ## Create RBM ops
    if args.REC:
        data = dl.add_index(data)[:args.nTE]
        ops = [
            rbm.create_gibbs_sampler_random(args.BSC, k=args.GBk),
            rbm.create_gibbs_sampler(k=args.GBk)
        ]
        obs_prediction = np.zeros([n_batches, 2, len(obs_correct)])
    else:
        ops = rbm.create_gibbs_sampler_random(args.BSC, k=args.GBk)
        obs_prediction = np.zeros([n_batches, len(obs_correct)])

    print('\nInitiating testing with %s.\n' % rbm_name)

    ## Define Session
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.GPU)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    sess.run(tf.global_variables_initializer())

    if args.REC:
        for iB in range(n_batches):
            sampled, recon = sess.run(ops,
                                      feed_dict={
                                          rbm.visible:
                                          data[iB * args.BSC:(iB + 1) *
                                               args.BSC]
                                      })
            obs_prediction[iB] = np.array([
                get_observables_with_corr_and_tpf(sampled[:, :, :, 0], T),
                get_observables_with_corr_and_tpf(recon[:, :, :, 0], T)
            ])

        sampling_error = np.abs(obs_prediction.mean(axis=0)[0] -
                                obs_correct) * 100 / obs_correct
        reconstr_error = np.abs(obs_prediction.mean(axis=0)[1] -
                                obs_correct) * 100 / obs_correct

    else:
        for iB in range(n_batches):
            sampled = sess.run(ops)
            obs_prediction[iB] = get_observables_with_corr_and_tpf(
                sampled[:, :, :, 0], T)

            ## Debugging
            print('\n')
            print(sampled[:, :, :, 0])
            print('\n')

        obs_pred = obs_prediction.mean(axis=0)
        sampling_error = np.abs(obs_pred - obs_correct) * 100 / obs_correct

    print('\nGibbs k=%d' % args.GBk)
    print('\nSampling:')
    print(sampling_error)
    if args.REC:
        print('\nReconstruction:')
        print(reconstr_error)

    ## Print observables
    print('\nCorrect vs Predicted Observables:')
    for (cor, pred) in zip(obs_correct, obs_pred):
        print('%.6f  -  %.6f' % (cor, pred))

    return
Esempio n. 8
0
def main(args):
    critical_n_samples = {8: 40000, 16: 100000}

    ## Decide if deep or not
    assert len(args.K) == len(args.Nw)
    if len(args.K) > 1:
        from networks.trainer_deep import Trainer
        args.WAVEP = None  ##!!! we have not fixed weight averaging for deep model yet!

    else:
        from networks.trainer import Trainer
        # Make Nw and K integers
        args.Nw, args.K = args.Nw[0], args.K[0]

    ## Load data
    if args.CR:
        T = 2 / np.log(1 + np.sqrt(2))
        save_dir = 'Trained_Models/Critical'

        data = dl.add_index(
            dl.read_file_critical(L=args.L,
                                  n_samples=critical_n_samples[args.L]))
        train_data = data[:args.nTR]
        val_data = data[args.nTR:args.nTR + args.nVAL]

    else:
        from data.directories import T_list
        T = T_list[args.iT]
        save_dir = 'Trained_Models/T%.4f' % T

        train_data = dl.add_index(
            dl.temp_partition(
                dl.read_file(L=args.L, n_samples=10000, train=True), args.iT))
        val_data = dl.add_index(
            dl.temp_partition(
                dl.read_file(L=args.L, n_samples=10000, train=False), args.iT))

    ## Prepare RBM
    rbm = Trainer(args)
    print('\nTemperature: %.4f  -  Critical: %s' % (T, str(args.CR)))
    if isinstance(args.K, int):
        print('RBM with %d visible units and %d hidden units.' %
              (rbm.Nv**2, rbm.Nh**2 * rbm.K))
        print('Number of weight parameters: %d.\n' % (rbm.Nw**2 * rbm.K))

    else:
        print('Created RBM with %d visible units.' % (rbm.Nv**2))
        print(args.K)
        print('\n')

    rbm.prepare_training()

    ## Set up directory for saving
    dl.create_directory(save_dir)
    save_dir += '/%s' % (rbm.name)
    dl.create_directory(save_dir)

    ## Train and save
    rbm.fit_and_save(train_data, val_data, directory=save_dir)

    return
Esempio n. 9
0
def main(args):
    data_mc = read_file(L=args.L, n_samples=args.nTE)
    
    set_GPU_memory(fraction=args.GPU)
    model = ModelLoader(list_ind=args.Mind)
    
    if args.TEST > args.nTE:
        args.TEST = args.nTE
        
    tpf = np.zeros([args.UP, len(T_list)])
    obs = np.zeros([args.UP, len(T_list), 3, 7])
    ## WARNING: Does not contain original MC observables
    
    pred_cont = []
    ## First upsampling
    T_ren = 2.0 / np.arccosh(np.exp(2.0 / T_list))
    for (iT, T) in enumerate(T_ren):
        ## Update model temperature ##
        T_model = T_list[np.abs(T - T_list).argmin()]
        model.update_temperature(T=T_model)
                
        ## Make predictions ##
        pred_cont.append(make_prediction(
                data_in=add_index(temp_partition(data_mc, iT, n_samples=args.nTE)),
                graph=model.graph, hid_filters=args.HF, kernels=args.K, hid_act=args.ACT))
        
        if iT < args.TS:
            pred_cont[iT] = np.round(pred_cont[iT])
        else:
            pred_cont[iT] = (pred_cont[iT] > np.random.random(pred_cont[iT].shape)).astype(np.int)
                
                
        ## Calculate observables ##
        obs[0, iT] = calculate_observables_rep(pred_cont[iT][:,:,0], Tr=T)
        tpf[0, iT] = two_point_function(2 * pred_cont[iT][:,:,0] - 1, k=int(pred_cont[iT].shape[1]**0.8/5))
        
        print('Temperature %d / %d done!'%(iT+1, len(T_list)))
        
    print('\nUpsampling 1 / %d completed!\n'%args.UP)
    
    for iUP in range(1, args.UP):
        T_ren = 2.0 / np.arccosh(np.exp(2.0 / T_ren))
        for (iT, T) in enumerate(T_ren):
            T_model = T_list[np.abs(T - T_list).argmin()]
            model.update_temperature(T=T_model)
                    
            ## Make predictions ##
            if iT < args.TS:
                pred_cont[iT] = np.round(pred_cont[iT])
            else:
                pred_cont[iT] = (pred_cont[iT] > np.random.random(pred_cont[iT].shape)).astype(np.int)
            
            pred_cont[iT] = make_prediction(data_in=pred_cont[iT], graph=model.graph, 
                     hid_filters=args.HF, kernels=args.K, hid_act=args.ACT)

            ## Calculate observables ##
            obs[iUP, iT] = calculate_observables_rep(pred_cont[iT][:,:,0], Tr=T)
            tpf[iUP, iT] = two_point_function(2 * pred_cont[iT][:,:,0] - 1, k=int(pred_cont[iT].shape[1]**0.8/5))
            
            print('Temperature %d / %d done!'%(iT+1, len(T_list)))
        
        print('\nUpsampling %d / %d completed!\n'%(iUP+1, args.UP))
            
    ## Save observables ##
    create_directory(quantities_rep_dir)
    np.save(quantities_rep_dir + '/%s_TS%d_UP%d_VER%d.npy'%(model.name, args.TS, args.UP, args.VER), obs)
    np.save(quantities_rep_dir + '/%s_TS%d_TPF_N085.npy'%(model.name, args.TS), tpf)