def evaluate(state,number): lpatch, rpatch, patch_targets = dhandler.evaluate() labels = np.argmax(patch_targets, axis=1) path = FLAGS.model_dir + '/' + str(number) with tf.Session() as session: limage = tf.placeholder(tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, num_channels], name='limage') rimage = tf.placeholder(tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size + FLAGS.disp_range - 1, num_channels], name='rimage') targets = tf.placeholder(tf.float32, [None, FLAGS.disp_range], name='targets') snet = nf.create(limage, rimage, targets, state,FLAGS.net_type) prod = snet['inner_product'] predicted = tf.argmax(prod, axis=1) acc_count = 0 saver = tf.train.Saver() saver.restore(session, tf.train.latest_checkpoint(path)) for i in range(0, lpatch.shape[0], FLAGS.eval_size): eval_dict = {limage: lpatch[i: i + FLAGS.eval_size], rimage: rpatch[i: i + FLAGS.eval_size], snet['is_training']: False} pred = session.run([predicted], feed_dict=eval_dict) acc_count += np.sum(np.abs(pred - labels[i: i + FLAGS.eval_size]) <= 3) print('iter. %d finished, with %d correct (3-pixel error)' % (i + 1, acc_count)) print('accuracy: %.3f' % ((acc_count / lpatch.shape[0]) * 100)) return ((acc_count / lpatch.shape[0]) * 100)
def evaluate(): lPatch, rPatch, patchTargets = dhandler.Evaluate() labels = np.argmax(patchTargets, axis=1) with tf.Session() as session: lImage = tf.compat.v1.placeholder(tf.float32, [None, FLAGS.patchSize, FLAGS.patchSize, numChannels], name='lImage') rImage = tf.compat.v1.placeholder(tf.float32, [None, FLAGS.patchSize, FLAGS.patchSize+FLAGS.dispRange-1, numChannels], name='rImage') targets = tf.compat.v1.placeholder(tf.float32, [None, FLAGS.dispRange], name='targets') snet = nf.create(lImage, rImage, targets, FLAGS.netType, FLAGS.patchSize, FLAGS.dispRange, FLAGS.dataVersion) prod = snet['innerProdut'] predicted = tf.argmax(prod, axis=1) accCount = 0 saver = tf.train.Saver() saver.restore(session, tf.train.latest_checkpoint(FLAGS.modelDir)) for i in range(0, lPatch.shape[0], FLAGS.evalSize): evalDict = {lImage:lPatch[i:i+FLAGS.evalSize], rImage:rPatch[i:i+FLAGS.evalSize], K.learning_phase():0} pred = session.run([predicted], feed_dict=evalDict) accCount += np.sum(np.abs(pred-labels[i:i+FLAGS.evalSize])<=3) print('iter. %d finished, with %d correct (3-pixel error)'%(i+1, accCount)) print('Accuracy: %.3f'%((accCount/lPatch.shape[0])*100))
scale_factor = 255 / (FLAGS.disp_range - 1) if not os.path.exists(FLAGS.out_dir): os.makedirs(FLAGS.out_dir) with tf.Session() as session: limage = tf.placeholder(tf.float32, [None, None, None, num_channels], name='limage') rimage = tf.placeholder(tf.float32, [None, None, None, num_channels], name='rimage') targets = tf.placeholder(tf.float32, [None, FLAGS.disp_range], name='targets') snet = nf.create(limage, rimage, targets, FLAGS.net_type) lmap = tf.placeholder(tf.float32, [None, None, None, 64], name='lmap') rmap = tf.placeholder(tf.float32, [None, None, None, 64], name='rmap') map_prod = nf.map_inner_product(lmap, rmap) saver = tf.train.Saver() saver.restore(session, tf.train.latest_checkpoint(FLAGS.model_dir)) for i in range(FLAGS.start_id, FLAGS.start_id + FLAGS.num_imgs): file_id = file_ids[i] if FLAGS.data_version == 'kitti2015': linput = misc.imread( ('%s/image_2/%06d_10.png') % (FLAGS.data_root, file_id))
def train(state, number): path = FLAGS.model_dir + '/' + str(number) if not os.path.exists(path): os.makedirs(path) tf.reset_default_graph() run_meta = tf.RunMetadata() g = tf.Graph() #cuda.select_device(0) strategy = tf.distribute.MirroredStrategy() with strategy.scope(): with g.as_default(): log = (tf.Session(config=tf.ConfigProto( log_device_placement=True)).list_devices()) file = open(path + '\\log.txt', 'a+') file.write(str(log)) file.close() limage = tf.placeholder( tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, num_channels], name='limage') rimage = tf.placeholder(tf.float32, [ None, FLAGS.patch_size, FLAGS.patch_size + FLAGS.disp_range - 1, num_channels ], name='rimage') targets = tf.placeholder(tf.float32, [None, FLAGS.disp_range], name='targets') snet = nf.create(limage, rimage, targets, state, FLAGS.net_type) loss = snet['loss'] train_step = snet['train_step'] session = tf.InteractiveSession() session.run(tf.global_variables_initializer()) saver = tf.train.Saver(max_to_keep=1) acc_loss = tf.placeholder(tf.float32, shape=()) loss_summary = tf.summary.scalar('loss', acc_loss) train_writer = tf.summary.FileWriter(path + '/training', g) saver = tf.train.Saver(max_to_keep=1) losses = [] summary_index = 1 lrate = 1e-2 for it in range(1, FLAGS.num_iter): lpatch, rpatch, patch_targets = dhandler.next_batch() train_dict = { limage: lpatch, rimage: rpatch, targets: patch_targets, snet['is_training']: True, snet['lrate']: lrate } _, mini_loss = session.run([train_step, loss], feed_dict=train_dict) losses.append(mini_loss) if it % 10 == 0: print('Loss at step: %d: %.6f' % (it, mini_loss)) #please us me later saver.save(session, os.path.join(path, 'model.ckpt'), global_step=snet['global_step']) train_summary = session.run( loss_summary, feed_dict={acc_loss: np.mean(losses)}) train_writer.add_summary(train_summary, summary_index) summary_index += 1 train_writer.flush() losses = [] if it == 24000: lrate = lrate / 5. elif it > 24000 and (it - 24000) % 8000 == 0: lrate = lrate / 5. opts = tf.profiler.ProfileOptionBuilder.float_operation() flops = tf.profiler.profile(g, run_meta=run_meta, cmd='op', options=opts) if flops is not None: t_flops = flops.total_float_ops print('wetwyy', t_flops) #cuda.select_device(0) #cuda.close() return t_flops
def train(): if not os.path.exists(FLAGS.model_dir): os.makedirs(FLAGS.model_dir) g = tf.Graph() with g.as_default(): limage = tf.placeholder( tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, num_channels], name='limage') rimage = tf.placeholder(tf.float32, [ None, FLAGS.patch_size, FLAGS.patch_size + FLAGS.disp_range - 1, num_channels ], name='rimage') targets = tf.placeholder(tf.float32, [None, FLAGS.disp_range], name='targets') snet = nf.create(limage, rimage, targets, FLAGS.net_type) loss = snet['loss'] train_step = snet['train_step'] session = tf.InteractiveSession() session.run(tf.global_variables_initializer()) saver = tf.train.Saver(max_to_keep=1) acc_loss = tf.placeholder(tf.float32, shape=()) loss_summary = tf.summary.scalar('loss', acc_loss) train_writer = tf.summary.FileWriter(FLAGS.model_dir + '/training', g) saver = tf.train.Saver(max_to_keep=1) losses = [] summary_index = 1 lrate = 1e-2 for it in range(1, FLAGS.num_iter): lpatch, rpatch, patch_targets = dhandler.next_batch() train_dict = { limage: lpatch, rimage: rpatch, targets: patch_targets, snet['is_training']: True, snet['lrate']: lrate } _, mini_loss = session.run([train_step, loss], feed_dict=train_dict) losses.append(mini_loss) if it % 100 == 0: print('Loss at step: %d: %.6f' % (it, mini_loss)) saver.save(session, os.path.join(FLAGS.model_dir, 'model.ckpt'), global_step=snet['global_step']) train_summary = session.run( loss_summary, feed_dict={acc_loss: np.mean(losses)}) train_writer.add_summary(train_summary, summary_index) summary_index += 1 train_writer.flush() losses = [] if it == 24000: lrate = lrate / 5. elif it > 24000 and (it - 24000) % 8000 == 0: lrate = lrate / 5.
def main(_): print(FLAGS.util_root) np.random.seed(123) file_ids = np.fromfile(os.path.join(FLAGS.util_root, 'myPerm.bin'), '<f4') if FLAGS.data_version == 'kitti2015': num_channels = 3 elif FLAGS.data_version == 'kitti2012': num_channels = 1 scale_factor = 255 / (FLAGS.disp_range - 1) if not os.path.exists(FLAGS.out_dir): os.makedirs(FLAGS.out_dir) with tf.Session() as session: limage = tf.placeholder(tf.float32, [None, None, None, num_channels], name='limage') rimage = tf.placeholder(tf.float32, [None, None, None, num_channels], name='rimage') targets = tf.placeholder(tf.float32, [None, FLAGS.disp_range], name='targets') snet = nf.create(limage, rimage, targets, FLAGS.net_type) lmap = tf.placeholder(tf.float32, [None, None, None, 64], name='lmap') rmap = tf.placeholder(tf.float32, [None, None, None, 64], name='rmap') map_prod = nf.map_inner_product(lmap, rmap) saver = tf.train.Saver() saver.restore(session, tf.train.latest_checkpoint(FLAGS.model_dir)) for i in range(FLAGS.start_id, FLAGS.start_id + FLAGS.num_imgs): file_id = file_ids[i] if FLAGS.data_version == 'kitti2015': linput = misc.imread( ('%s/image_2/%06d_10.png') % (FLAGS.data_root, file_id)) rinput = misc.imread( ('%s/image_3/%06d_10.png') % (FLAGS.data_root, file_id)) elif FLAGS.data_version == 'kitti2012': linput = misc.imread( ('%s/image_0/%06d_10.png') % (FLAGS.data_root, file_id)) rinput = misc.imread( ('%s/image_1/%06d_10.png') % (FLAGS.data_root, file_id)) linput = (linput - linput.mean()) / linput.std() rinput = (rinput - rinput.mean()) / rinput.std() linput = linput.reshape(1, linput.shape[0], linput.shape[1], num_channels) rinput = rinput.reshape(1, rinput.shape[0], rinput.shape[1], num_channels) test_dict = { limage: linput, rimage: rinput, snet['is_training']: False } limage_map, rimage_map = session.run( [snet['lbranch'], snet['rbranch']], feed_dict=test_dict) map_width = limage_map.shape[2] unary_vol = np.zeros( (limage_map.shape[1], limage_map.shape[2], FLAGS.disp_range)) for loc in range(FLAGS.disp_range): x_off = -loc l = limage_map[:, :, max(0, -x_off):map_width, :] r = rimage_map[:, :, 0:min(map_width, map_width + x_off), :] res = session.run(map_prod, feed_dict={lmap: l, rmap: r}) unary_vol[:, max(0, -x_off):map_width, loc] = res[0, :, :] print('Image %s processed.' % (i + 1)) pred = np.argmax(unary_vol, axis=2) * scale_factor misc.imsave('%s/disp_map_%06d_10.png' % (FLAGS.out_dir, file_id), pred)
def train(state, number, weights,first): count1 = 0 count2 = 0 path = FLAGS.model_dir + '/' + str(number) if not os.path.exists(path): os.makedirs(path) tf.reset_default_graph() run_meta = tf.RunMetadata() g = tf.Graph() #cuda.select_device(0) strategy=tf.distribute.MirroredStrategy() with strategy.scope(): with g.as_default(): limage = tf.placeholder(tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, num_channels], name='limage') rimage = tf.placeholder(tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size + FLAGS.disp_range - 1, num_channels], name='rimage') targets = tf.placeholder(tf.float32, [None, FLAGS.disp_range], name='targets') snet = nf.create(limage, rimage, targets, state, FLAGS.net_type) loss = snet['loss'] train_step = snet['train_step'] session = tf.InteractiveSession() session.run(tf.global_variables_initializer()) saver = tf.train.Saver(max_to_keep=1) acc_loss = tf.placeholder(tf.float32, shape=()) loss_summary = tf.summary.scalar('loss', acc_loss) train_writer = tf.summary.FileWriter(path + '/training', g) saver = tf.train.Saver(max_to_keep=1) losses = [] summary_index = 1 lrate = 1e-2 if first == False: check=False y=g._collections['trainable_variables'] for ele1 in y: if ele1.name in weights.keys(): try: op=tf.assign(ele1._variable,weights[ele1.name]) session.run(op) count1+=1 print('change',count1) except: count2+=1 print('not_changed',count2) pass '''print(',,,,,,,,,,') print(ele1._variable.eval()) print('............') print(weights[ele1.name]) input('frgsgrre')''' #print('..................................................................................') #x = g._collections['trainable_variables'] #print(x) #print('..................................................................................') #y = past_g._collections['trainable_variables'] #print(y) #print('///////////////////////////////////////////////////////////////////////////////////') '''for ele in x: print(ele.name) if ele in y: print(ele)''' '''for ele1 in x: for ele2 in y: if ele1.name == ele2.name and ele1.shape == ele2.shape: print(ele1._variable.eval().shape) print(ele2._variable.eval().shape) print('?????????') # (ele1._variable).assign(ele2._variable) # ele1._variable.eval().assign(ele2._variable.eval()) # (ele1._variable.eval()).assign(ele2._variable.eval()) ele1._variable=ele2._variable print('..........................') with tf.Session().as_default() as sess: print(sess.run(ele1._variable.eval())) input('rni') print('------------------------') print(ele2._variable.eval(session=sess)) print('//////////////////////////') input()''' for it in range(1, FLAGS.num_iter): lpatch, rpatch, patch_targets = dhandler.next_batch() train_dict = {limage: lpatch, rimage: rpatch, targets: patch_targets, snet['is_training']: True, snet['lrate']: lrate} _, mini_loss = session.run([train_step, loss], feed_dict=train_dict) losses.append(mini_loss) if it % 10 == 0: print('Loss at step: %d: %.6f' % (it, mini_loss)) #please us me later saver.save(session, os.path.join(path, 'model.ckpt'), global_step=snet['global_step']) train_summary = session.run(loss_summary, feed_dict={acc_loss: np.mean(losses)}) train_writer.add_summary(train_summary, summary_index) summary_index += 1 train_writer.flush() losses = [] if it == 24000: lrate = lrate / 5. elif it > 24000 and (it - 24000) % 8000 == 0: lrate = lrate / 5. weights={} x=g._collections['trainable_variables'] for ele in x: weights.update({ele.name:ele._variable.eval()}) opts = tf.profiler.ProfileOptionBuilder.float_operation() flops = tf.profiler.profile(g, run_meta=run_meta, cmd='op', options=opts) if flops is not None: t_flops=flops.total_float_ops print('wetwyy', t_flops) #cuda.select_device(0) file1=open(path+'\\g.txt','a+') file1.write(str(g._nodes_by_name)) file1.close() '''x = list(set(g._collections['trainable_variables']).intersection(past_g._collections['trainable_variables'])) print('.................................................................................') print(x) print('.................................................................................')''' '''for i in g._collections['trainable_variables']: for j in past_g._collections['trainable_variables']: if i._variable==j._variable: print(i._variable) print(j._variable)''' #past_g=g #cuda.close() return t_flops,weights
def train(state, number): #save all the half-trained models in separated directories path = FLAGS.model_dir + '/' + str(number) if not os.path.exists(path): os.makedirs(path) tf.reset_default_graph() run_meta = tf.RunMetadata() g = tf.Graph() with tf.device('/gpu:0'): with g.as_default(): limage = tf.placeholder( tf.float32, [None, FLAGS.patch_size, FLAGS.patch_size, num_channels], name='limage') rimage = tf.placeholder(tf.float32, [ None, FLAGS.patch_size, FLAGS.patch_size + FLAGS.disp_range - 1, num_channels ], name='rimage') targets = tf.placeholder(tf.float32, [None, FLAGS.disp_range], name='targets') snet = nf.create(limage, rimage, targets, state, FLAGS.net_type) loss = snet['loss'] train_step = snet['train_step'] session = tf.InteractiveSession() session.run(tf.global_variables_initializer()) saver = tf.train.Saver(max_to_keep=1) acc_loss = tf.placeholder(tf.float32, shape=()) loss_summary = tf.summary.scalar('loss', acc_loss) train_writer = tf.summary.FileWriter(path + '/training', g) saver = tf.train.Saver(max_to_keep=1) losses = [] summary_index = 1 lrate = 1e-2 time = 0 for it in range(1, FLAGS.num_iter): lpatch, rpatch, patch_targets = dhandler.next_batch() train_dict = { limage: lpatch, rimage: rpatch, targets: patch_targets, snet['is_training']: True, snet['lrate']: lrate } _, mini_loss = session.run([train_step, loss], feed_dict=train_dict) losses.append(mini_loss) #update the trained weights each 100 iterations if it % 10 == 0: print('Loss at step: %d: %.6f' % (it, mini_loss)) saver.save(session, os.path.join(path, 'model.ckpt'), global_step=snet['global_step']) train_summary = session.run( loss_summary, feed_dict={acc_loss: np.mean(losses)}) train_writer.add_summary(train_summary, summary_index) summary_index += 1 train_writer.flush() losses = [] if it == 24000: lrate = lrate / 5. elif it > 24000 and (it - 24000) % 8000 == 0: lrate = lrate / 5. opts = tf.profiler.ProfileOptionBuilder.float_operation() flops = tf.profiler.profile(g, run_meta=run_meta, cmd='op', options=opts) if flops is not None: t_flops = flops.total_float_ops return t_flops