num_classes = 249 dataset_name = 'isogr_ rgb' training_datalist = 'trte_splits/IsoGD_Image/train_rgb_list.txt' testing_datalist = 'trte_splits/IsoGD_Image/valid_rgb_list.txt' sess = tf.InteractiveSession() #x=[batch_size,seq_len,height,width,channels] x = tf.placeholder(tf.float32, [batch_size, seq_len, 112, 112, 3], name='datas') # y=[batch_size,label] y = tf.placeholder(tf.int32, shape=[ batch_size, ], name='labels') # get the output of the layer networks = net.c3d_clstm(x, num_classes, False, True) networks_y = networks.outputs print(networks) networks_y_op = tf.argmax(tf.nn.softmax(networks_y), 1) networks_cost = tl.cost.cross_entropy(networks_y, y, name="cost_network") tf.summary.scalar("network loss", networks_cost) correct_pred = tf.equal(tf.cast(networks_y_op, tf.int32), y) networks_accu = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) tf.summary.scalar("accary", networks_accu) # test the model files # predictions = net.c3d_clstm(x,num_classes,True,False) # predictions_y_op = tf.argmax(tf.nn.softmax(predictions.outputs),1) # predictions_accu = tf.reduce_mean(tf.cast(tf.equal(tf.cast(predictions_y_op),tf.int32),y),tf.float32)
strtime = '%s%s%s-%s%s%s' % (d.split('-')[0], d.split('-')[1], d.split('-')[2], t.split(':')[0], t.split(':')[1], t.split(':')[2]) saved_stdout = sys.stdout mem_log = cStringIO.StringIO() sys.stdout = mem_log logfile = './log/training_%s_%s.log' % (dataset_name, strtime) log = open(logfile, 'w') sess = tf.InteractiveSession() x = tf.placeholder(tf.float32, [batch_size, seq_len, 112, 112, 3], name='x') y = tf.placeholder(tf.int32, shape=[ batch_size, ], name='y') networks = net.c3d_clstm(x, num_classes, False, True) networks_y = networks.outputs networks_y_op = tf.argmax(tf.nn.softmax(networks_y), 1) networks_cost = tl.cost.cross_entropy(networks_y, y, 'loss') correct_pred = tf.equal(tf.cast(networks_y_op, tf.int32), y) networks_accu = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) predictons = net.c3d_clstm(x, num_classes, True, False) predicton_y_op = tf.argmax(tf.nn.softmax(predictons.outputs), 1) predicton_accu = tf.reduce_mean( tf.cast(tf.equal(tf.cast(predicton_y_op, tf.int32), y), tf.float32)) l2_cost = tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[0]) + \ tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[6]) + \ tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[12]) + \ tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[14]) + \