Пример #1
0
    def __init__(self, batch_size):

#        NUM_CORES = 4
#        session = tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=NUM_CORES, intra_op_parallelism_threads=NUM_CORES))

        num_hidden_units = 2048
        session = tf.InteractiveSession()
        input_units = 28
        output_units = 1
        x = tf.placeholder("float", shape=[None, input_units], name='x')
        true_y = tf.placeholder("float", shape=[None, output_units], name='y_')

#        W_fc1 = weight_variable([input_units, num_hidden_units])
        W_fc1 = weight_variable([input_units, num_hidden_units])
        b_fc1 = bias_variable([num_hidden_units])
        h_fc1 = tf.nn.relu(tf.matmul(x, W_fc1) + b_fc1)

 #       W_fc2 = weight_variable([num_hidden_units, num_hidden_units])
        W_fc2 = weight_variable([num_hidden_units, num_hidden_units])
        b_fc2 = bias_variable([num_hidden_units])
        h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)

  #      W_fc3 = weight_variable([num_hidden_units, num_hidden_units])
        W_fc3 = weight_variable([num_hidden_units, num_hidden_units])
        b_fc3 = bias_variable([num_hidden_units])
        h_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)

   #     W_fc4 = weight_variable([num_hidden_units, output_units])
        W_fc4 = weight_variable([num_hidden_units, output_units])
        b_fc4 = bias_variable([output_units])

        keep_prob = tf.Variable(0.5, name='keep_prob', trainable=False)
        h_fc3_dropout = tf.nn.dropout(h_fc3, keep_prob)

        guess_y = tf.matmul(h_fc3, W_fc4) + b_fc4
        guess_y_dropout = tf.matmul(h_fc3_dropout, W_fc4) + b_fc4

        variables = [W_fc1, b_fc1, W_fc2, b_fc2, W_fc3, b_fc3, W_fc4, b_fc4]                                                                                               
       # loss =  tf.nn.l2_loss(guess_y_dropout - true_y)                                                                                                                   
        #loss = tf.reduce_mean(tf.square(tf.sign(guess_y_dropout) - tf.sign(true_y - 0.5)))                                                                                
        loss = tf.reduce_mean(tf.square(guess_y_dropout - true_y))                                                                                                         
#        optimizer = tf.train.AdamOptimizer(learning_rate=0.00001, beta1=0.99, beta2=0.999, epsilon=1e-06, use_locking=False, name='Adam')                                 
                                                                                                                                                                           
#optimizer = tf.train.MomentumOptimizer(learning_rate=0.0000001, momentum=0.9, use_locking=False, name='momentum')                                                         
        optimizer = tf.train.MomentumOptimizer(learning_rate=0.0000001, momentum=0.9995, use_locking=False, name='Momentum')                                               
       # optimizer = tf.train.RMSPropOptimizer(1e-4, decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, name='RMSProp')                                            
        compute_gradients = optimizer.compute_gradients(loss, variables)                                                                                                   
        apply_gradients = optimizer.apply_gradients(compute_gradients)                                                                                                     
        minimize = optimizer.minimize(loss)                                                                                                                                
        #correct_prediction = tf.equal(tf.clip_by_value(tf.round(guess_y), 0.0, 1.0), true_y)                                                                              
        #correct_prediction = tf.equal(tf.round(guess_y), true_y)                                                                                                          
        #correct_prediction = tf.equal((tf.sign(guess_y) + 1)/2., true_y)                                                                                                  
        error_rate = loss                                                                                                                                                  
        #error_rate = 1 - tf.reduce_mean(tf.cast(correct_prediction, "float"))                                                                                             
#               correct_prediction = tf.equal(tf.argmax(guess_y,1), tf.argmax(true_y,1))                                                                                   
                                                                                                                                                                           
        ParameterServerModel.__init__(self, x, true_y, compute_gradients, apply_gradients, minimize, error_rate, session,batch_size)                                                        
Пример #2
0
    def __init__(self, batch_size, gpu=True):
        #NUM_CORES = 4
        self.gpu = gpu
        self.batch_size = batch_size
        session = tf.InteractiveSession()
        #        session = tf.Session(config=tf.ConfigProto(log_device_placement=True))
        #session = tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=NUM_CORES, intra_op_parallelism_threads=NUM_CORES))
        #        with session.graph.device(self.device_for_node):
        input_units = 784
        output_units = 10
        hidden_units = 1024
        x = tf.placeholder("float", shape=[None, input_units], name='x')
        #x_image = tf.reshape(x, [-1,28,28,1], name='reshape')
        true_y = tf.placeholder("float", shape=[None, output_units], name='y_')

        W_fc0 = weight_variable([input_units, hidden_units], 'W_fc0')
        b_fc0 = bias_variable([hidden_units], 'b_fc0')
        h_fc0 = tf.nn.relu(tf.matmul(x, W_fc0) + b_fc0)

        W_fc1 = weight_variable([hidden_units, hidden_units], 'W_fc1')
        b_fc1 = bias_variable([hidden_units], 'b_fc1')
        h_fc1 = tf.nn.relu(tf.matmul(h_fc0, W_fc1) + b_fc1)

        W_fc2 = weight_variable([hidden_units, output_units], 'W_fc2')
        b_fc2 = bias_variable([output_units], 'b_fc2')

        keep_prob = tf.Variable(0.5, name='keep_prob', trainable=False)
        h_fc1_dropout = tf.nn.dropout(h_fc1, keep_prob)

        guess_y = tf.matmul(h_fc1, W_fc2) + b_fc2
        guess_y_dropout = tf.matmul(h_fc1_dropout, W_fc2) + b_fc2

        variables = [W_fc0, b_fc0, W_fc1, b_fc1, W_fc2, b_fc2]
        loss = tf.nn.softmax_cross_entropy_with_logits(guess_y_dropout, true_y)

        optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
        compute_gradients = optimizer.compute_gradients(loss, variables)
        apply_gradients = optimizer.apply_gradients(compute_gradients)
        minimize = optimizer.minimize(loss)
        correct_prediction = tf.equal(tf.argmax(guess_y, 1),
                                      tf.argmax(true_y, 1))
        error_rate = 1 - tf.reduce_mean(tf.cast(correct_prediction, "float"))

        ParameterServerModel.__init__(self, x, true_y, compute_gradients,
                                      apply_gradients, minimize, error_rate,
                                      session, batch_size)
Пример #3
0
 def __init__(self):
     session = tf.InteractiveSession()
     x = tf.placeholder("float", shape=[None, 784], name='x')
     x_image = tf.reshape(x, [-1,28,28,1], name='reshape')
     y_ = tf.placeholder("float", shape=[None, 10], name='y_')
     W_conv1 = weight_variable([5, 5, 1, 32], 'W_conv1')
     b_conv1 = bias_variable([32], 'b_conv1')
     h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
     h_pool1 = max_pool_2x2(h_conv1)
     W_conv2 = weight_variable([5, 5, 32, 64], 'W_conv2')
     b_conv2 = bias_variable([64], 'b_conv2')
     h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
     h_pool2 = max_pool_2x2(h_conv2)
     W_fc1 = weight_variable([7 * 7 * 64, 1024], 'W_fc1')
     b_fc1 = bias_variable([1024], 'b_fc1')
     h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
     h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
     keep_prob = tf.Variable(0.5, name='keep_prob', trainable=False)
     h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
     W_fc2 = weight_variable([1024, 10], 'W_fc2')
     b_fc2 = bias_variable([10], 'b_fc2')
     
     # not using dropout for testing, only training
     y_conv_dropout = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
     y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2
     
     variables = [W_conv1, b_conv1, W_conv2, b_conv2, W_fc1, b_fc1, W_fc2, b_fc2]
     loss = tf.nn.softmax_cross_entropy_with_logits(y_conv_dropout, y_)
     
     optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
     compute_gradients = optimizer.compute_gradients(loss, variables)
     apply_gradients = optimizer.apply_gradients(compute_gradients)
     minimize = optimizer.minimize(loss)
     correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
     accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
     
     ParameterServerModel.__init__(self,
             x,
             y_,
             compute_gradients,
             apply_gradients,
             minimize,
             accuracy,
             session)
Пример #4
0
    def __init__(self, batch_size, gpu=True):
        #NUM_CORES = 4
	self.gpu = gpu	
        self.batch_size = batch_size
	session = tf.InteractiveSession()
#        session = tf.Session(config=tf.ConfigProto(log_device_placement=True))
	#session = tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=NUM_CORES, intra_op_parallelism_threads=NUM_CORES))
#        with session.graph.device(self.device_for_node):
	input_units = 784
	output_units = 10
	hidden_units = 1024
	x = tf.placeholder("float", shape=[None, input_units], name='x')
                #x_image = tf.reshape(x, [-1,28,28,1], name='reshape')
	true_y = tf.placeholder("float", shape=[None, output_units], name='y_')

	W_fc0 = weight_variable([input_units, hidden_units], 'W_fc0')
	b_fc0 = bias_variable([hidden_units], 'b_fc0')
	h_fc0 = tf.nn.relu(tf.matmul(x, W_fc0) + b_fc0)

	W_fc1 = weight_variable([hidden_units, hidden_units], 'W_fc1')
	b_fc1 = bias_variable([hidden_units], 'b_fc1')
	h_fc1 = tf.nn.relu(tf.matmul(h_fc0, W_fc1) + b_fc1)

	W_fc2 = weight_variable([hidden_units, output_units], 'W_fc2')
	b_fc2 = bias_variable([output_units], 'b_fc2')

	keep_prob = tf.Variable(0.5, name='keep_prob', trainable=False)
	h_fc1_dropout = tf.nn.dropout(h_fc1, keep_prob)

	guess_y = tf.matmul(h_fc1, W_fc2) + b_fc2
	guess_y_dropout = tf.matmul(h_fc1_dropout, W_fc2) + b_fc2

	variables = [W_fc0, b_fc0, W_fc1, b_fc1, W_fc2, b_fc2]
	loss = tf.nn.softmax_cross_entropy_with_logits(guess_y_dropout, true_y)

	optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
	compute_gradients = optimizer.compute_gradients(loss, variables)
	apply_gradients = optimizer.apply_gradients(compute_gradients)
	minimize = optimizer.minimize(loss)
	correct_prediction = tf.equal(tf.argmax(guess_y,1), tf.argmax(true_y,1))
	error_rate = 1 - tf.reduce_mean(tf.cast(correct_prediction, "float"))

	ParameterServerModel.__init__(self, x, true_y, compute_gradients, apply_gradients, minimize, error_rate, session, batch_size)
Пример #5
0
    def __init__(self):
        session = tf.InteractiveSession()
        x = tf.placeholder("float", shape=[None, 784], name='x')
        x_image = tf.reshape(x, [-1, 28, 28, 1], name='reshape')
        y_ = tf.placeholder("float", shape=[None, 10], name='y_')
        W_conv1 = weight_variable([5, 5, 1, 32], 'W_conv1')
        b_conv1 = bias_variable([32], 'b_conv1')
        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
        h_pool1 = max_pool_2x2(h_conv1)
        W_conv2 = weight_variable([5, 5, 32, 64], 'W_conv2')
        b_conv2 = bias_variable([64], 'b_conv2')
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = max_pool_2x2(h_conv2)
        W_fc1 = weight_variable([7 * 7 * 64, 1024], 'W_fc1')
        b_fc1 = bias_variable([1024], 'b_fc1')
        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
        keep_prob = tf.Variable(0.5, name='keep_prob', trainable=False)
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
        W_fc2 = weight_variable([1024, 10], 'W_fc2')
        b_fc2 = bias_variable([10], 'b_fc2')

        # not using dropout for testing, only training
        y_conv_dropout = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
        y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2

        variables = [
            W_conv1, b_conv1, W_conv2, b_conv2, W_fc1, b_fc1, W_fc2, b_fc2
        ]
        loss = tf.nn.softmax_cross_entropy_with_logits(y_conv_dropout, y_)

        optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
        compute_gradients = optimizer.compute_gradients(loss, variables)
        apply_gradients = optimizer.apply_gradients(compute_gradients)
        minimize = optimizer.minimize(loss)
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

        ParameterServerModel.__init__(self, x, y_, compute_gradients,
                                      apply_gradients, minimize, accuracy,
                                      session)
Пример #6
0
    def __init__(self, batch_size, gpu=False):
        session = tf.Session()

        self.gpu = gpu
        #self.batch_size = batch_size
        input_units = 784
        output_units = 10
        hidden_units = 100  #700

        x = tf.placeholder('float32', shape=[None, input_units], name='x')
        true_y = tf.placeholder('float32',
                                shape=[None, output_units],
                                name='y')

        W_fc0 = weight_variable([input_units, hidden_units], 'W_fc0')
        b_fc0 = bias_variable([hidden_units], 'b_fc0')
        h_fc0 = tf.nn.relu(tf.matmul(x, W_fc0) + b_fc0)

        W_fc1 = weight_variable([hidden_units, output_units], 'W_fc1')
        b_fc1 = bias_variable([output_units], 'b_fc1')

        guess_y = tf.matmul(h_fc0, W_fc1) + b_fc1

        variables = [W_fc0, b_fc0, W_fc1, b_fc1]
        loss = tf.nn.softmax_cross_entropy_with_logits(guess_y, true_y)

        optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
        compute_gradients = optimizer.compute_gradients(loss, variables)
        apply_gradients = optimizer.apply_gradients(compute_gradients)
        minimize = optimizer.minimize(loss)
        correct_prediction = tf.equal(tf.argmax(guess_y, 1),
                                      tf.argmax(true_y, 1))
        error_rate = 1 - tf.reduce_mean(tf.cast(correct_prediction, 'float32'))

        ParameterServerModel.__init__(self, x, true_y, compute_gradients,
                                      apply_gradients, minimize, error_rate,
                                      session, batch_size)
Пример #7
0
   def __init__(self, input_size, output_size, conv_filter_sizes, dense_layer_sizes, image_width,
                 image_height, dropout = 0.5, learning_rate = 1e-4, channels = 1, l2norm = 1e-5, gpu=False,
                 recall = 'input_x', dropname = 'drop_pb', predict = 'predictions', batch_size=64, dump=sys.stdout):
      session = tf.InteractiveSession()
      
      training_accuracies = []
      validation_accuracies = []
      training_steps = []
      features_weights = []
      test_accuracy = 0
      
      x = tf.placeholder('float', shape = [None, image_width, image_height, channels], name=recall)
      y_ = tf.placeholder('float', shape = [None, output_size])
      keep_prob = tf.placeholder('float', name=dropname)
      
      with tf.device('/gpu:0' if gpu else '/cpu:0'):
         # Build the network structure
         previous_inputs_size = input_size
         inputs_next = x
         for conv_layer in conv_filter_sizes:
            cur_filter =  weight_variable(conv_layer)
            cur_conv_bias = bias_variable([conv_layer[-1]])
            out_conv = tf.nn.conv2d(inputs_next, cur_filter, strides=[1,1,1,1], padding='SAME')
            inputs_next = tf.nn.relu(out_conv + cur_conv_bias)

         new_width = out_conv.get_shape()[1]
         new_height = out_conv.get_shape()[2]
         new_channels = out_conv.get_shape()[3]
         out_pool_flat = tf.reshape(out_conv, [-1, int(new_channels*new_height*new_width)])

         # build some densely connected layers
         inputs_next = out_pool_flat
         previous_inputs_size = int(new_height*new_width*new_channels)
         for layer_size in dense_layer_sizes[1:-1]:
            cur_weights = weight_variable([previous_inputs_size, layer_size])
            cur_bias = bias_variable([layer_size])
            cur_neurons = tf.nn.relu(tf.matmul(inputs_next, cur_weights) + cur_bias)
            cur_dropout = tf.nn.dropout(cur_neurons, keep_prob = keep_prob)
            inputs_next = cur_dropout
            previous_inputs_size = layer_size

         final_inputs = inputs_next
         prev_add_size = 0

         # This is the last layer
         out_weights = weight_variable([previous_inputs_size + prev_add_size, output_size])
         out_bias = bias_variable([output_size])
         out_final = tf.nn.softmax(tf.matmul(final_inputs, out_weights) + out_bias, name=predict)
         # define cost function
         cost_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(out_final,1e-10,1.0))) + tf.reduce_sum(tf.clip_by_value(l2norm*cur_weights,1e-10,1.0))

         # define optimiser
         opt = tf.train.AdamOptimizer(learning_rate)
         compute_gradients = opt.compute_gradients(cost_entropy, tf.trainable_variables())
         apply_gradients = opt.apply_gradients(compute_gradients)
         minimize = opt.minimize(cost_entropy)

         # define accuracy
         correct_prediction = tf.equal(tf.argmax(out_final,1), tf.argmax(y_,1))
         accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

      ParameterServerModel.__init__(self, x, y_, keep_prob, dropout, compute_gradients, apply_gradients, minimize, accuracy, session, batch_size, out_final, gpu, dump)
Пример #8
0
    def __init__(self, batch_size):

        #        NUM_CORES = 4
        #        session = tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=NUM_CORES, intra_op_parallelism_threads=NUM_CORES))

        num_hidden_units = 2048
        session = tf.InteractiveSession()
        input_units = 28
        output_units = 1
        x = tf.placeholder("float", shape=[None, input_units], name='x')
        true_y = tf.placeholder("float", shape=[None, output_units], name='y_')

        #        W_fc1 = weight_variable([input_units, num_hidden_units])
        W_fc1 = weight_variable([input_units, num_hidden_units])
        b_fc1 = bias_variable([num_hidden_units])
        h_fc1 = tf.nn.relu(tf.matmul(x, W_fc1) + b_fc1)

        #       W_fc2 = weight_variable([num_hidden_units, num_hidden_units])
        W_fc2 = weight_variable([num_hidden_units, num_hidden_units])
        b_fc2 = bias_variable([num_hidden_units])
        h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)

        #      W_fc3 = weight_variable([num_hidden_units, num_hidden_units])
        W_fc3 = weight_variable([num_hidden_units, num_hidden_units])
        b_fc3 = bias_variable([num_hidden_units])
        h_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)

        #     W_fc4 = weight_variable([num_hidden_units, output_units])
        W_fc4 = weight_variable([num_hidden_units, output_units])
        b_fc4 = bias_variable([output_units])

        keep_prob = tf.Variable(0.5, name='keep_prob', trainable=False)
        h_fc3_dropout = tf.nn.dropout(h_fc3, keep_prob)

        guess_y = tf.matmul(h_fc3, W_fc4) + b_fc4
        guess_y_dropout = tf.matmul(h_fc3_dropout, W_fc4) + b_fc4

        variables = [W_fc1, b_fc1, W_fc2, b_fc2, W_fc3, b_fc3, W_fc4, b_fc4]
        # loss =  tf.nn.l2_loss(guess_y_dropout - true_y)
        #loss = tf.reduce_mean(tf.square(tf.sign(guess_y_dropout) - tf.sign(true_y - 0.5)))
        loss = tf.reduce_mean(tf.square(guess_y_dropout - true_y))
        #        optimizer = tf.train.AdamOptimizer(learning_rate=0.00001, beta1=0.99, beta2=0.999, epsilon=1e-06, use_locking=False, name='Adam')

        #optimizer = tf.train.MomentumOptimizer(learning_rate=0.0000001, momentum=0.9, use_locking=False, name='momentum')
        optimizer = tf.train.MomentumOptimizer(learning_rate=0.0000001,
                                               momentum=0.9995,
                                               use_locking=False,
                                               name='Momentum')
        # optimizer = tf.train.RMSPropOptimizer(1e-4, decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, name='RMSProp')
        compute_gradients = optimizer.compute_gradients(loss, variables)
        apply_gradients = optimizer.apply_gradients(compute_gradients)
        minimize = optimizer.minimize(loss)
        #correct_prediction = tf.equal(tf.clip_by_value(tf.round(guess_y), 0.0, 1.0), true_y)
        #correct_prediction = tf.equal(tf.round(guess_y), true_y)
        #correct_prediction = tf.equal((tf.sign(guess_y) + 1)/2., true_y)
        error_rate = loss
        #error_rate = 1 - tf.reduce_mean(tf.cast(correct_prediction, "float"))
        #               correct_prediction = tf.equal(tf.argmax(guess_y,1), tf.argmax(true_y,1))

        ParameterServerModel.__init__(self, x, true_y, compute_gradients,
                                      apply_gradients, minimize, error_rate,
                                      session, batch_size)
Пример #9
0

''' console model parameters '''
log_step_cost_object.write("Learning rate: "+str(PARAM_LEARNING_RATE)+"  Momentum:  "+str(PARAM_MOMENTUM)+"  Cost Weight Strength:  "+str(PARAM_COST_WEIGHT_STRENGTH)+"  Random Seed:  "+str(PARAM_RANDOM_SEED)+"\r\n")
log_step_cost_object.write("Layer_1: "+str(PARAM_HIDDEN_LAYER_1)+"  Layer_2:  "+str(PARAM_HIDDEN_LAYER_2)+"  Layer_3:  "+str(PARAM_HIDDEN_LAYER_3)+"  Layer_4:  "+str(PARAM_HIDDEN_LAYER_4)+"\r\n")
log_epoch_cost_object.write("Learning rate: "+str(PARAM_LEARNING_RATE)+"  Momentum:  "+str(PARAM_MOMENTUM)+"  Cost Weight Strength:  "+str(PARAM_COST_WEIGHT_STRENGTH)+"  Random Seed:  "+str(PARAM_RANDOM_SEED)+"\r\n")
log_epoch_cost_object.write("Layer_1: "+str(PARAM_HIDDEN_LAYER_1)+"  Layer_2:  "+str(PARAM_HIDDEN_LAYER_2)+"  Layer_3:  "+str(PARAM_HIDDEN_LAYER_3)+"  Layer_4:  "+str(PARAM_HIDDEN_LAYER_4)+"\r\n")
statistic_file_test_object.write("Learning rate: "+str(PARAM_LEARNING_RATE)+"  Momentum:  "+str(PARAM_MOMENTUM)+"  Cost Weight Strength:  "+str(PARAM_COST_WEIGHT_STRENGTH)+"  Random Seed:  "+str(PARAM_RANDOM_SEED)+"\r\n")
statistic_file_test_object.write("Layer_1: "+str(PARAM_HIDDEN_LAYER_1)+"  Layer_2:  "+str(PARAM_HIDDEN_LAYER_2)+"  Layer_3:  "+str(PARAM_HIDDEN_LAYER_3)+"  Layer_4:  "+str(PARAM_HIDDEN_LAYER_4)+"\r\n")
statistic_file_train_object.write("Learning rate: "+str(PARAM_LEARNING_RATE)+"  Momentum:  "+str(PARAM_MOMENTUM)+"  Cost Weight Strength:  "+str(PARAM_COST_WEIGHT_STRENGTH)+"  Random Seed:  "+str(PARAM_RANDOM_SEED)+"\r\n")
statistic_file_train_object.write("Layer_1: "+str(PARAM_HIDDEN_LAYER_1)+"  Layer_2:  "+str(PARAM_HIDDEN_LAYER_2)+"  Layer_3:  "+str(PARAM_HIDDEN_LAYER_3)+"  Layer_4:  "+str(PARAM_HIDDEN_LAYER_4)+"\r\n")

merged_summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(LOG_ADDRESS, tf.Graph.as_graph_def(sess.graph))

p = ParameterServerModel(X, Y, PARAM_MOMENTUM, PARAM_COST_WEIGHT_STRENGTH, variables_shapes, compute_gradients, apply_gradients, minimize, error_rate, sess, 128)

for epoch in range(350):
	#print "Training in epoch: ", epoch
	#log_step_cost_object.write("Training in epoch: "+str(epoch)+"\r\n")
	for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
		minibatch_features = trX[start:end]
		minibatch_activity = trY[start:end]
		p.train(minibatch_activity, minibatch_features)
		p.apply(p.gradients)
		step_cost = sess.run(cost, feed_dict={X: trX[start:end], Y: trY[start:end]})
		log_step_cost_object.write(str(step_cost)+"\r\n")
		log_step_cost_object.flush()
	epoch_cost = sess.run(cost, feed_dict={X: trX[start:end], Y: trY[start:end]})
	log_epoch_cost_object.write(str(epoch_cost)+"\r\n")
	log_epoch_cost_object.flush()