os.system("ls -l N%dx%dx%d_L%d_U%d_Mu%s_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu%s_T//g -e s/.HSF.stream//g > dtau2.dat" %(n_x,n_x,n_x,L,U2,Mu,n_x,n_x,n_x,L,U2,Mu))
    dtau2 = np.genfromtxt("dtau2.dat")
    dtau = np.hstack((dtau1,dtau2))
    os.remove("dtau1.dat")
    os.remove("dtau2.dat")

  # Array of shuffled file's file number 
  filenumber = np.arange(1+File_index_offset,len(dtau)+1,1)
  if len(filenumber) > Max_nfile :
    filenumber = filenumber[:Max_nfile]
  #if not(use_single_U) and np.mod(len(filenumber),2) == 1:
  #  print 'Attention! When using data set with two Us for training, make sure that there are EVEN number of files. Exiting...'
  #  sys.exit()

  # Provide file information to the data_reader module.
  HSF = data_reader.insert_file_info(filename,filenumber, load_test_data_only=load_test_data_only)
  # Load and catogorize data into either training data, test data, validation data, or 
  # all of them. If validation data is needed, set include_validation_data to (T)
  # in the insert_file_info() module above.
  HSF = HSF.categorize_data(make_spin_down_negative=True)


elif not(perform_classification_with_label) :
  # Get temperature and save them to a file.
  os.system("ls -l N%dx%dx%d_L%d_U%d_Mu%s_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu%s_T//g -e s/.HSF.stream//g > dtau.dat" %(n_x,n_x,n_x,L,U,Mu,n_x,n_x,n_x,L,U,Mu))
  # Load temperature into a list of string
  dtau = np.genfromtxt("dtau.dat",dtype='str')
  os.remove("dtau.dat")
  # The number of lines to skip at the beginning of the file if not all of the data is
  # to be loaded.
  sh = SKIPHEADER_tmp #ndata_per_temp  - classification_data_per_temp
    print 'Process: classification with label.'
  else :
    print 'Process: classification without label.'
    print 'Using %d data per temperature.' % classification_data_per_temp
  # xxxxx Don't change the following variable. xxxxx
  # When perform_classification is set to True, only test data will be loaded.
  perform_classification = T

if perform_classification_with_label == True :
  # Get temperature and save them to a file.
  os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat" %(n_x,n_x,n_x,L,U,n_x,n_x,n_x,L,U))
  dtau = np.genfromtxt("dtau.dat")
  # Array of shuffled file's file number 
  filenumber = np.arange(1,len(dtau)+1,1)
  # Provide file information to the data_reader module.
  HSF = data_reader.insert_file_info(filename,filenumber, performing_classification=perform_classification)
  # Load and catogorize data into either training data, test data, validation data, or 
  # all of them. If validation data is needed, set include_validation_data to (T)
  # in the insert_file_info() module above.
  HSF = HSF.categorize_data()

else :
  # Get temperature and save them to a file.
  os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat" %(n_x,n_x,n_x,L,U,n_x,n_x,n_x,L,U))
  # Load temperature into a list of string
  dtau = np.genfromtxt("dtau.dat",dtype='str')
  # The number of lines to skip at the beginning of the file if not all of the data is
  # to be loaded.
  sh = ndata_per_temp  - classification_data_per_temp
  while ( ndata_per_temp - sh - classification_data_per_temp ) < 0 :
    print 'Sum of classification data per temperature and the number of lines skip at the beginning of the file must be equal to number of data per temnperature.'
        print 'Process: classification without label.'
        print 'Using %d data per temperature.' % classification_data_per_temp
    # xxxxx Don't change the following variable. xxxxx
    # When perform_classification is set to True, only test data will be loaded.
    perform_classification = T

if perform_classification_with_label == True:
    # Get temperature and save them to a file.
    os.system(
        "ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat"
        % (n_x, n_x, n_x, L, U, n_x, n_x, n_x, L, U))
    dtau = np.genfromtxt("dtau.dat")
    # Array of shuffled file's file number
    filenumber = np.arange(1, len(dtau) + 1, 1)
    # Provide file information to the data_reader module.
    HSF = data_reader.insert_file_info(
        filename, filenumber, performing_classification=perform_classification)
    # Load and catogorize data into either training data, test data, validation data, or
    # all of them. If validation data is needed, set include_validation_data to (T)
    # in the insert_file_info() module above.
    HSF = HSF.categorize_data()

else:
    # Get temperature and save them to a file.
    os.system(
        "ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat"
        % (n_x, n_x, n_x, L, U, n_x, n_x, n_x, L, U))
    # Load temperature into a list of string
    dtau = np.genfromtxt("dtau.dat", dtype='str')
    # The number of lines to skip at the beginning of the file if not all of the data is
    # to be loaded.
    sh = ndata_per_temp - classification_data_per_temp
    def __init__(self, use_single_U = True, U = 5, U1 = 5, U2 = 20, name_output_file_by_date_first = True): 
        self.U = U
        self.U1 = U1
        self.U2 = U2
        self.use_single_U = use_single_U
        self.name_output_file_by_date_first = name_output_file_by_date_first

        if use_single_U :
          print 'Training using U = %d' % U
        else :
          print 'Training using U = %d and U = %d.' % (U1,U2) 

        # System size
        #   number of spin in each of the cube dimension
        self.n_x = 4
        n_x = self.n_x
        #   number of imaginary time dimension
        self.L = 200
        L = self.L
        #   Volume of tesseract
        self.V4d = L*(n_x)**3

        # Maximum number of data file to be used for training and testing.
        Max_nfile = 100
        # Offset to the file index (to load)
        File_index_offset = 0

        if use_single_U :
            # Input labelled and shuffled filename for training and performaing classification
            # with labels.
            filename = './N%dx%dx%d_L%d_U%d_Mu0_T_shuffled' % (n_x,n_x,n_x,L,U) + '_%.2d.dat'

            # Input raw filename for performing classification without labels.
            rawdata_filename = './N%dx%dx%d_L%d_U%d_Mu0_T' % (n_x,n_x,n_x,L,U) + '%s.HSF.stream'
        else :
            # Input labelled and shuffled filename for training and performaing classification
            # with labels.
            if U1 < U2 :
                filename = './N%dx%dx%d_L%d_U%d+U%d_Mu0_T_shuffled' % (n_x,n_x,n_x,L,U1,U2) + '_%.2d.dat'
            else :
                filename = './N%dx%dx%d_L%d_U%d+U%d_Mu0_T_shuffled' % (n_x,n_x,n_x,L,U2,U1) + '_%.2d.dat'

        # Load data
        if use_single_U :
          # Get temperature and save them to a file.
          os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat" %(n_x,n_x,n_x,L,U,n_x,n_x,n_x,L,U))
          self.dtau = np.genfromtxt("dtau.dat")
          os.remove("dtau.dat")
        # Array of shuffled file's file number 
        else :
          # Get temperature and save them to a file.
          os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau1.dat" %(n_x,n_x,n_x,L,U1,n_x,n_x,n_x,L,U1))
          dtau1 = np.genfromtxt("dtau1.dat")
          # Get temperature and save them to a file.
          os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau2.dat" %(n_x,n_x,n_x,L,U2,n_x,n_x,n_x,L,U2))
          dtau2 = np.genfromtxt("dtau2.dat")
          self.dtau = np.hstack((dtau1,dtau2))
          os.remove("dtau1.dat")
          os.remove("dtau2.dat")

        # Array of shuffled file's file number 
        filenumber = np.arange(1+File_index_offset,len(self.dtau)+1,1)
        if len(filenumber) > Max_nfile :
          filenumber = filenumber[:Max_nfile]

        # Provide file information to the data_reader module.
        HSF = data_reader.insert_file_info(filename,filenumber, load_test_data_only=False)
        # Load and catogorize data into either training data, test data, validation data, or 
        # all of them. If validation data is needed, set include_validation_data to (T)
        # in the insert_file_info() module above.
        self.HSF = HSF.categorize_data()
Example #5
0
import sys
import numpy as np
import data_reader
import time

filename = '/home/kelvin/Desktop/Theano test/HSF_N4x4x4_L200_U9_Mu0_UniformTGrid/N4x4x4_L200_U9_Mu0_T_shuffled_%.2d.HSF.stream'
filename_weight_bias = "./wb.ckpt"
filename_measure = "./HSF_measure.dat"
filenumber = np.arange(1, 41, 1)
HSF = data_reader.insert_file_info(filename, filenumber)
HSF = HSF.categorize_data()

i_training = 6400
n_feature_map1 = 32
n_feature_map2 = 32
n_feature_map3 = 8
n_feature_map4 = 8
n_spin = 4
n_time_dimension = 200
n_output_neuron = 2
n_fully_connected_neuron = 30

filter_d = 2
filter_h = filter_d
filter_w = filter_d

import tensorflow as tf
# If you are not using an InteractiveSession, then you should build
# the entire computation graph before starting a session and
# launching the graph.
sess = tf.InteractiveSession()
Example #6
0
        dtau2 = np.genfromtxt("dtau2.dat")
        dtau = np.hstack((dtau1, dtau2))
        os.remove("dtau1.dat")
        os.remove("dtau2.dat")

    # Array of shuffled file's file number
    filenumber = np.arange(1 + File_index_offset, len(dtau) + 1, 1)
    if len(filenumber) > Max_nfile:
        filenumber = filenumber[:Max_nfile]
    #if not(use_single_U) and np.mod(len(filenumber),2) == 1:
    #  print 'Attention! When using data set with two Us for training, make sure that there are EVEN number of files. Exiting...'
    #  sys.exit()

    # Provide file information to the data_reader module.
    HSF = data_reader.insert_file_info(filename,
                                       filenumber,
                                       load_test_data_only=load_test_data_only)
    # Load and catogorize data into either training data, test data, validation data, or
    # all of them. If validation data is needed, set include_validation_data to (T)
    # in the insert_file_info() module above.
    HSF = HSF.categorize_data(make_spin_down_negative=True)

elif not (perform_classification_with_label):
    # Get temperature and save them to a file.
    os.system(
        "ls -l N%dx%dx%d_L%d_U%d_Mu%s_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu%s_T//g -e s/.HSF.stream//g > dtau.dat"
        % (n_x, n_x, n_x, L, U, Mu, n_x, n_x, n_x, L, U, Mu))
    # Load temperature into a list of string
    dtau = np.genfromtxt("dtau.dat", dtype='str')
    os.remove("dtau.dat")
    # The number of lines to skip at the beginning of the file if not all of the data is
import sys
import numpy as np
import data_reader
import time

filename = '/home/kelvin/Desktop/Theano test/HSF_N4x4x4_L200_U9_Mu0_UniformTGrid/N4x4x4_L200_U9_Mu0_T_shuffled_%.2d.HSF.stream'
filename_weight_bias = "./wb.ckpt"
filename_measure = "./HSF_measure.dat"
filenumber = np.arange(1,41,1)
HSF = data_reader.insert_file_info( filename, filenumber )
HSF = HSF.categorize_data()

i_training = 6400
n_feature_map1 = 32
n_feature_map2 = 32
n_feature_map3 = 8
n_feature_map4 = 8
n_spin = 4
n_time_dimension = 200
n_output_neuron = 2
n_fully_connected_neuron = 30

filter_d = 2
filter_h = filter_d
filter_w = filter_d

import tensorflow as tf
# If you are not using an InteractiveSession, then you should build
# the entire computation graph before starting a session and 
# launching the graph.
sess = tf.InteractiveSession()
Example #8
0
    "If you set your logs directory manually make sure"
    "to use /logs/ when running on ClusterOne cloud.")
flags.DEFINE_string(
    "log_dir", get_logs_path(root=PATH_TO_LOCAL_LOGS),
    "Path to dataset. It is recommended to use get_data_path()"
    "to define your data directory.so that you can switch "
    "from local to clusterone without changing your code."
    "If you set the data directory manually makue sure to use"
    "/data/ as root path when running on ClusterOne cloud.")

FLAGS = flags.FLAGS

n_x = 10

Ising = data_reader.insert_file_info(FLAGS.data_dir + "2D%d" % (n_x) +
                                     "_p_%.1d.txt",
                                     np.arange(1, 3),
                                     load_test_data_only=False)
Ising = Ising.categorize_data()

n_output_neuron = 2

filter_d = 2
filter_h = 2

n_feature_map1 = 16
n_feature_map2 = 8
n_fully_connected_neuron = 16

batch_size = 50
n_train_data = 50000
    def __init__(self,
                 use_single_U=True,
                 U=5,
                 U1=5,
                 U2=20,
                 name_output_file_by_date_first=True):
        self.U = U
        self.U1 = U1
        self.U2 = U2
        self.use_single_U = use_single_U
        self.name_output_file_by_date_first = name_output_file_by_date_first

        if use_single_U:
            print 'Training using U = %d' % U
        else:
            print 'Training using U = %d and U = %d.' % (U1, U2)

        # System size
        #   number of spin in each of the cube dimension
        self.n_x = 4
        n_x = self.n_x
        #   number of imaginary time dimension
        self.L = 200
        L = self.L
        #   Volume of tesseract
        self.V4d = L * (n_x)**3

        # Maximum number of data file to be used for training and testing.
        Max_nfile = 100
        # Offset to the file index (to load)
        File_index_offset = 0

        if use_single_U:
            # Input labelled and shuffled filename for training and performaing classification
            # with labels.
            filename = './N%dx%dx%d_L%d_U%d_Mu0_T_shuffled' % (
                n_x, n_x, n_x, L, U) + '_%.2d.dat'

            # Input raw filename for performing classification without labels.
            rawdata_filename = './N%dx%dx%d_L%d_U%d_Mu0_T' % (
                n_x, n_x, n_x, L, U) + '%s.HSF.stream'
        else:
            # Input labelled and shuffled filename for training and performaing classification
            # with labels.
            if U1 < U2:
                filename = './N%dx%dx%d_L%d_U%d+U%d_Mu0_T_shuffled' % (
                    n_x, n_x, n_x, L, U1, U2) + '_%.2d.dat'
            else:
                filename = './N%dx%dx%d_L%d_U%d+U%d_Mu0_T_shuffled' % (
                    n_x, n_x, n_x, L, U2, U1) + '_%.2d.dat'

        # Load data
        if use_single_U:
            # Get temperature and save them to a file.
            os.system(
                "ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat"
                % (n_x, n_x, n_x, L, U, n_x, n_x, n_x, L, U))
            self.dtau = np.genfromtxt("dtau.dat")
            os.remove("dtau.dat")
        # Array of shuffled file's file number
        else:
            # Get temperature and save them to a file.
            os.system(
                "ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau1.dat"
                % (n_x, n_x, n_x, L, U1, n_x, n_x, n_x, L, U1))
            dtau1 = np.genfromtxt("dtau1.dat")
            # Get temperature and save them to a file.
            os.system(
                "ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau2.dat"
                % (n_x, n_x, n_x, L, U2, n_x, n_x, n_x, L, U2))
            dtau2 = np.genfromtxt("dtau2.dat")
            self.dtau = np.hstack((dtau1, dtau2))
            os.remove("dtau1.dat")
            os.remove("dtau2.dat")

        # Array of shuffled file's file number
        filenumber = np.arange(1 + File_index_offset, len(self.dtau) + 1, 1)
        if len(filenumber) > Max_nfile:
            filenumber = filenumber[:Max_nfile]

        # Provide file information to the data_reader module.
        HSF = data_reader.insert_file_info(filename,
                                           filenumber,
                                           load_test_data_only=False)
        # Load and catogorize data into either training data, test data, validation data, or
        # all of them. If validation data is needed, set include_validation_data to (T)
        # in the insert_file_info() module above.
        self.HSF = HSF.categorize_data()