Ejemplo n.º 1
0
    def load_graph(self):

        n = int(self.n.get())
        l = int(self.l.get())
        m = int(self.m.get())
        Z = int(self.Z.get())
        a0 = int(self.a0.get())

        model.Z = Z
        model.a0 = Z * a0

        if len(self.res.get()) > 0:
            res = int(self.res.get())

        model.validate_quantum_numbers(n, l, m)

        model.graph(n, l, m, resolution=res)
Ejemplo n.º 2
0
def train(batch_size, learning_rate, x, y):
	logits = graph(x)
	_y = tf.one_hot(indices=tf.cast(y, tf.int32), depth=10)
	loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=_y, logits=logits), axis=0)
	acc = tf.equal(tf.argmax(logits, 1), tf.argmax(_y, 1))
	acc = tf.reduce_mean(tf.cast(acc, tf.float32))
	global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="global_step")
	optimizer = tf.train.AdamOptimizer(learning_rate)
	train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
	return loss, acc, train_op, global_step
Ejemplo n.º 3
0
    def __init__(self, model_name=None, max_sentence_length=400):
        self.max_sentence_length = max_sentence_length
        with open('config.txt', 'r') as config_file:
            config = json.loads(config_file.read().decode('utf-8'))
        self.char_dict = config['c_mapping']
        self.label_dict = config['l_mapping']
        self.reverse_labels_mapping = {}
        for k, v in self.label_dict.items():
            self.reverse_labels_mapping[v] = k

        #'building graph'
        self.graph = m.graph(max_sentence_length=max_sentence_length)
        with tf.variable_scope('mode'):
            self.graph.build_graph(is_train=False)
            saver = tf.train.Saver()
            self.sess = tf.Session()
            saver.restore(self.sess, default_model_path)
Ejemplo n.º 4
0
import model
import data_reader
import tensorflow.compat.v1 as tf
import numpy as np

tf.disable_eager_execution()

#load graph
mlp = model.graph(data_reader.VOCAB_SIZE, hidden_size=50)
loss, predict_label = mlp.buid_graph()
train = mlp.trainer(loss, 0.1)

#load data
train_data_reader = data_reader.data_reader(data_reader.VOCAB_SIZE, 128,
                                            'tf_idf.txt')

# train
saver = tf.train.Saver()  # object để save graph
with tf.Session() as sess:
    sess.run(
        tf.global_variables_initializer())  # khởi tạo cho các tensor variable
    #saver.restore(sess,'saved_variable)            # restore lại weight
    step, MAX_STEP = 0, 10
    while step < MAX_STEP:
        batch_data, batch_label = train_data_reader.next_batch()
        label_eval, loss_eval, _ = sess.run([predict_label, loss, train],
                                            feed_dict={
                                                mlp.input: batch_data,
                                                mlp.output: batch_label
                                            })
        step += 1
Ejemplo n.º 5
0
def main(args):
    bs = args.batch_size
    trainpath = args.train_csv
    validationpath = args.validation_csv
    learning_rate = args.lr
    logdir = args.log_dir

    if not os.path.exists(logdir):
        os.makedirs(logdir)

    default_values = [[0.0] for _ in range(785)]
    
    def decode(line):
        item = tf.decode_csv(line, default_values)
        return item[0], item[1:]
    
    
    # Skip the header and filter any comments.
    training_dataset = tf.data.TextLineDataset(trainpath).skip(1).filter(lambda line: tf.not_equal(tf.substr(line, 0, 1), "#"))
    validation_dataset = tf.data.TextLineDataset(validationpath).skip(1).filter(lambda line: tf.not_equal(tf.substr(line, 0, 1), "#"))
    
    # The dataset api reads the csv as text.
    # Using the below function we can split the text into labels and pixels.
    
    training_dataset = (training_dataset.cache().map(decode))
    #training_dataset = training_dataset.map(
    #    lambda x: tf.py_func(splitter, [x], [tf.float32, tf.float32]))
    validation_dataset = validation_dataset.cache().map(decode)
    


    # Normalize the dataset to 0-1 range
    training_dataset = training_dataset.map(
        lambda label, pixel: tf.py_func(normalize, [label, pixel], [tf.float32, tf.float32]))
    validation_dataset = validation_dataset.map(
        lambda label, pixel: tf.py_func(normalize, [label, pixel], [tf.float32, tf.float32]))
    
    # Randomly shuffles the dataset
    training_dataset = training_dataset.shuffle(buffer_size=args.buffer_size)
    
    # Creating batchs here for training
    training_dataset = training_dataset.batch(bs)
    validation_dataset = validation_dataset.batch(bs)
    
    # A feedable iterator is defined by a handle placeholder and its structure. We
    # could use the `output_types` and `output_shapes` properties of either
    # `training_dataset` or `validation_dataset` here, because they have
    # identical structure.
    handle = tf.placeholder(tf.string, shape=[])
    iterator = tf.data.Iterator.from_string_handle(
        handle, training_dataset.output_types, training_dataset.output_shapes)
    
    next_element = iterator.get_next()
    
    # You can use feedable iterators with a variety of different kinds of iterator
    # (such as one-shot and initializable iterators).
    training_iterator = training_dataset.make_initializable_iterator()
    validation_iterator = validation_dataset.make_initializable_iterator()
    
    # Define training op here
    #train_graph = tf.Graph()
    #tf.reset_default_graph()
    
    x = tf.placeholder('float32',shape=[bs,None])
    y = tf.placeholder('int32',shape=[bs])
    #optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    logits = graph(x) # training mode

    _y = tf.one_hot(indices=tf.cast(y, tf.int32), depth=10)
    #loss = create_loss(logits, _y)
    loss = tf.nn.softmax_cross_entropy_with_logits(labels=_y, logits=logits)
    loss = tf.reduce_mean(loss,axis=0)
    tf.summary.scalar('loss', loss)

    global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="global_step")    
    optimizer = tf.train.AdamOptimizer(learning_rate)
    train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())

    #train_op, global_step = create_optimizer(logits, learning_rate=learning_rate)
    training_loss = []
    epoch_loss = []
    
    with tf.Session() as sess:
        
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(logdir, sess.graph)

        sess.run(tf.group(tf.global_variables_initializer(), 
                          tf.local_variables_initializer()))

        # The `Iterator.string_handle()` method returns a tensor that can be evaluated
        # and used to feed the `handle` placeholder.
        training_handle = sess.run(training_iterator.string_handle())
        validation_handle = sess.run(validation_iterator.string_handle())
        
        for i in range(args.nrof_epochs):
            sess.run(training_iterator.initializer)
            while True:
                try:
                    label_batch, image_batch = sess.run(next_element, feed_dict={handle: training_handle})
                    #np.save('x.npy',image_batch)
                    #print(label_batch)
                    # xx = sess.run([_y], feed_dict = {y:label_batch})
                    summary, _loss,_, g = sess.run([merged, loss,train_op,global_step], feed_dict = {x:image_batch, y:label_batch})

                    training_loss.append(_loss)
                    epoch_loss.append(_loss)
                    if tf.train.global_step(sess, global_step)%10==0:
                        train_writer.add_summary(summary)
                        print_results(g, training_loss)
                        training_loss = []
                except tf.errors.OutOfRangeError:
                    print('Out of Data, Training Finished!')
                    break
                #finally:
                #    sess.run(validation_iterator.initializer)
                #    sess.run(next_element, feed_dict={handle: validation_handle})
            print_results_epoch(i, epoch_loss)
            epoch_loss = []
                #sess.run(validation_iterator.initializer)
                #sess.run(next_element, feed_dict={handle: validation_handle})
        train_writer.close()
    return True
			print(str(i)+'-'+str(j)+'-'+str(k))
			
			#features
			coo, feat_type = data_preprocessing.cooccurrence(dataset_detection_video, k)

			coint, feat_type = data_preprocessing.cointersection(dataset_detection_video, k)

			for index, video in enumerate(coint):
				video['sequence'] = np.concatenate((video['sequence'], coo[index]['sequence']),axis=1)

			#splitting train & test
			splitted_data = data_preprocessing.split_data(coint)

			# create the graph
			model.graph(splitted_data,i,j)

			# train & save 
			model.train(splitted_data, classlbl_to_classid, 60, 32, feat_type, k)
	



'''
#========PREDICTION============

# data loading (pickle)
dataset_detection_video, classlbl_to_classid = data_preprocessing.load_data()

rnd_video_index = np.random.choice(len(dataset_detection_video),1)[0]