def main(argv=None): # unroll arguments of train config_agent.init_FLAGS('train') VARS['mode'] = 'train' load_data_lst(data_lst) FLAGS['input_queue']['capacity'] = 1 Multi_gpu_solver().start()
def main(argv=None): # unroll arguments of eval config_agent.init_FLAGS('eval') VARS['mode'] = 'eval' Eval_solver().start()
def main(argv=None): # unroll arguments of train config_agent.init_FLAGS('train') Train_solver().start()
self.reader.close() def read_into_hdf5(self): max_steps = self.all_count // self.input_batch_size print('total steps: %d' % max_steps) records = '' for step in xrange(max_steps): hdf5_path = self.hdf5_path.replace('$', '%d' % step) f = h5py.File(hdf5_path, 'w') f.create_dataset('data', (self.input_batch_size, 3, 224, 224)) f.create_dataset('label', (self.input_batch_size, 1, 1, 1)) inputs = self.sess.run(self.inputs) assert (inputs['X'].shape[0] == self.input_batch_size) f['data'][:] = np.transpose(inputs['X'], [0, 3, 1, 2])[:] f['label'][:] = inputs['Y'].reshape( [self.input_batch_size, 1, 1, 1])[:] f.close() records += '%s\n' % hdf5_path if step % 1 == 0: print(step) with open(self.txt_path, 'w') as f: f.write(records) if __name__ == '__main__': config_agent.init_FLAGS('train') VARS['mode'] = 'train' Fake_reader().start()
def main(argv=None): #unroll arguments of prediction config_agent.init_FLAGS('eval') Output_solver().start()
def main(argv=None): # unroll arguments of prediction config_agent.init_FLAGS('eval') VARS['mode'] = 'test' Test_solver().start()
def main(argv=None): # unroll arguments of train config_agent.init_FLAGS('train') VARS['mode'] = 'train' Multi_gpu_solver().start()