def train_net(config): # UNPACK CONFIGS (train_filenames, img_mean) = unpack_configs(config) import theano.sandbox.cuda theano.sandbox.cuda.use(config['gpu']) import theano theano.config.on_unused_input = 'warn' import theano.tensor as T from multilabel_layers import DropoutLayer from multilabel_net import CNN_model, compile_models import theano.misc.pycuda_init import theano.misc.pycuda_utils # load hash_step1_bits group_idx = sio.loadmat('./step1/temp/group_idx.mat') group_idx = group_idx['group_idx'] group_idx = group_idx[0][0] code_per_group = 8 bits_idxes = range((group_idx - 1) * code_per_group) config['output_num'] = len(bits_idxes) model = CNN_model(config) batch_size = model.batch_size layers = model.layers weight_types = model.weight_types params = model.params val_filenames = train_filenames[:20] n_train_batches = len(train_filenames) minibatch_range = range(n_train_batches) ## COMPILE FUNCTIONS ## (train_model, validate_model, predict_model, train_error, learning_rate, shared_x, shared_y, vels) = compile_models(model, config) train_labels = None for idx in bits_idxes: hash_step1_code = h5py.File('./step1/temp/hash_step1_code_' + str(idx + 1) + '.mat') temp = np.transpose(np.asarray( hash_step1_code['hash_step1_code'])).astype('int64') if train_labels is None: train_labels = temp else: train_labels = np.hstack([train_labels, temp]) train_labels[train_labels == -1] = 0 val_labels = train_labels[:20 * batch_size] ######################### TRAIN MODEL ################################ print '... training' # initialize_weights(layers, weight_types) # learning_rate.set_value(config['learning_rate']) # vels = [theano.shared(param_i.get_value() * 0.) # for param_i in params] # Start Training Loop epoch = 0 step_idx = 0 val_record = [] predicted_labels = None while epoch < config['n_epochs']: epoch = epoch + 1 if config['shuffle']: np.random.shuffle(minibatch_range) if config['finetune'] and epoch == 1 and not config['resume_train']: load_weights_finetune(layers, config['finetune_weights_dir']) count = 0 for minibatch_index in minibatch_range: num_iter = (epoch - 1) * n_train_batches + count count = count + 1 if count == 1: s = time.time() if count == 20: e = time.time() print "time per 20 iter:", (e - s) cost_ij = train_model_wrap(train_model, shared_x, shared_y, minibatch_index, minibatch_range, batch_size, train_labels, train_filenames, img_mean) if num_iter % config['print_freq'] == 0: print 'training @ iter = ', num_iter print 'training cost:', cost_ij if config['print_train_error']: print 'training error rate:', train_error() ############### Test on Validation Set ################## DropoutLayer.SetDropoutOff() this_validation_error, this_validation_loss = get_val_error_loss( shared_x, shared_y, img_mean, val_filenames, val_labels, batch_size, validate_model) print('epoch %i: validation loss %f ' % (epoch, this_validation_loss)) print('epoch %i: validation error %f %%' % (epoch, this_validation_error * 100.)) val_record.append([this_validation_error, this_validation_loss]) savepath = config['weights_dir'] + 'classifier_' + str(group_idx - 1) + '/' if not os.path.exists(savepath): os.mkdir(savepath) np.save(savepath + 'val_record.npy', val_record) DropoutLayer.SetDropoutOn() ############################################ # Adapt Learning Rate step_idx = adjust_learning_rate(config, epoch, step_idx, val_record, learning_rate) # Save weights for each iteration if epoch % 5 == 0: save_weights(layers, savepath, epoch) np.save(savepath + 'lr_' + str(epoch) + '.npy', learning_rate.get_value()) save_momentums(vels, savepath, epoch) DropoutLayer.SetDropoutOff() # generate the labels for minibatch_index in range(n_train_batches): label = get_prediction_labels(predict_model, shared_x, minibatch_index, train_filenames, img_mean) if predicted_labels is None: predicted_labels = label[0] else: predicted_labels = np.vstack((predicted_labels, label[0])) hash_step2_code = {'hash_step2_code': predicted_labels} sio.savemat('./temp/hash_step2_code_' + str(group_idx - 1) + '.mat', hash_step2_code) DropoutLayer.SetDropoutOn() print('Optimization complete.')
def train_net(config): # UNPACK CONFIGS (flag_para_load, train_filenames, val_filenames, train_labels, val_labels, img_mean) = unpack_configs(config) # pycuda set up drv.init() dev = drv.Device(int(config['gpu'][-1])) ctx = dev.make_context() if flag_para_load: # zmq set up sock = zmq.Context().socket(zmq.PAIR) sock.connect('tcp://*****:*****@ iter = ', num_iter print 'training cost:', cost_ij if config['print_train_error']: print 'training error rate:', train_error() if flag_para_load and (count < len(minibatch_range)): load_send_queue.put('calc_finished') ############### Test on Validation Set ################## DropoutLayer.SetDropoutOff() this_validation_error, this_validation_loss = get_val_error_loss( rand_arr, shared_x, shared_y, val_filenames, val_labels, flag_para_load, img_mean, batch_size, validate_model, send_queue=load_send_queue, recv_queue=load_recv_queue) print('epoch %i: validation loss %f ' % (epoch, this_validation_loss)) print('epoch %i: validation error %f %%' % (epoch, this_validation_error * 100.)) val_record.append([this_validation_error, this_validation_loss]) np.save(config['weights_dir'] + 'val_record.npy', val_record) DropoutLayer.SetDropoutOn() ############################################ # Adapt Learning Rate step_idx = adjust_learning_rate(config, epoch, step_idx, val_record, learning_rate) # Save weights if epoch % config['snapshot_freq'] == 0: save_weights(layers, config['weights_dir'], epoch) np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy', learning_rate.get_value()) save_momentums(vels, config['weights_dir'], epoch) print('Optimization complete.')
def train_net(config): # UNPACK CONFIGS (flag_para_load, train_filenames, val_filenames, train_labels, val_labels, img_mean) = unpack_configs(config) if flag_para_load: # zmq set up sock = zmq.Context().socket(zmq.PAIR) sock.connect('tcp://*****:*****@ iter = %i" % (num_iter)) logger.info("training cost: %lf" % (cost_ij)) if config['print_train_error']: logger.info('training error rate: %lf' % train_error()) if flag_para_load and (count < len(minibatch_range)): load_send_queue.put('calc_finished') ############### Test on Validation Set ################## #""" DropoutLayer.SetDropoutOff() result_list = get_val_error_loss( rand_arr, shared_x, shared_y, val_filenames, val_labels, flag_para_load, img_mean, batch_size, validate_model, send_queue=load_send_queue, recv_queue=load_recv_queue, flag_top_5=flag_top5) logger.info(('epoch %i: validation loss %f ' % (epoch, result_list[-1]))) if flag_top5: logger.info(('epoch %i: validation error (top 1) %f %%, (top5) %f %%' % (epoch, result_list[0] * 100., result_list[1] * 100.))) else: logger.info(('epoch %i: validation error %f %%' % (epoch, result_list[0] * 100.))) val_record.append(result_list) np.save(config['weights_dir'] + 'val_record.npy', val_record) DropoutLayer.SetDropoutOn() ############################################ # Adapt Learning Rate step_idx = adjust_learning_rate(config, epoch, step_idx, val_record, learning_rate) # Save weights if epoch % config['snapshot_freq'] == 0: save_weights(layers, config['weights_dir'], epoch) np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy', learning_rate.get_value()) save_momentums(vels, config['weights_dir'], epoch) #""" print('Optimization complete.')
def train_net(config, private_config): # UNPACK CONFIGS (flag_para_load, flag_datalayer, train_filenames, val_filenames, train_labels, val_labels, img_mean) = \ unpack_configs(config, ext_data=private_config['ext_data'], ext_label=private_config['ext_label']) gpu_send_queue = private_config['queue_gpu_send'] gpu_recv_queue = private_config['queue_gpu_recv'] # pycuda and zmq set up drv.init() dev = drv.Device(int(private_config['gpu'][-1])) ctx = dev.make_context() sock_gpu = zmq.Context().socket(zmq.PAIR) if private_config['flag_client']: sock_gpu.connect('tcp://*****:*****@ iter = ', num_iter print 'training cost:', cost_ij if config['print_train_error']: error_ij = train_error() gpu_send_queue.put(error_ij) that_error = gpu_recv_queue.get() error_ij = (error_ij + that_error) / 2. if private_config['flag_verbose']: print 'training error rate:', error_ij if flag_para_load and (count < len(minibatch_range)): load_send_queue.put('calc_finished') ############### Test on Validation Set ################## DropoutLayer.SetDropoutOff() this_val_error, this_val_loss = get_val_error_loss( rand_arr, shared_x, shared_y, val_filenames, val_labels, flag_datalayer, flag_para_load, batch_size, validate_model, send_queue=load_send_queue, recv_queue=load_recv_queue) # report validation stats gpu_send_queue.put(this_val_error) that_val_error = gpu_recv_queue.get() this_val_error = (this_val_error + that_val_error) / 2. gpu_send_queue.put(this_val_loss) that_val_loss = gpu_recv_queue.get() this_val_loss = (this_val_loss + that_val_loss) / 2. if private_config['flag_verbose']: print('epoch %i: validation loss %f ' % (epoch, this_val_loss)) print('epoch %i: validation error %f %%' % (epoch, this_val_error * 100.)) val_record.append([this_val_error, this_val_loss]) if private_config['flag_save']: np.save(config['weights_dir'] + 'val_record.npy', val_record) DropoutLayer.SetDropoutOn() ############################################ # Adapt Learning Rate step_idx = adjust_learning_rate(config, epoch, step_idx, val_record, learning_rate) # Save Weights, only one of them will do if private_config['flag_save']: if epoch % config['snapshot_freq'] == 0: save_weights(layers, config['weights_dir'], epoch) np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy', learning_rate.get_value()) save_momentums(vels, config['weights_dir'], epoch) print('Optimization complete.')
def train_net(config): # UNPACK CONFIGS (flag_para_load, train_filenames, val_filenames, train_labels, val_labels, img_mean) = unpack_configs(config) if flag_para_load: # zmq set up sock = zmq.Context().socket(zmq.PAIR) sock.connect('tcp://*****:*****@iter " + str(count) if count == 1: s = time.time() if count == 20: e = time.time() print "time per 20 iter:", (e - s) logger.info("time per 20 iter: %f" % (e - s)) cost_ij = train_model_wrap(train_model, shared_x, shared_y, rand_arr, img_mean, count, minibatch_index, minibatch_range, batch_size, train_filenames, train_labels, flag_para_load, config['batch_crop_mirror'], send_queue=load_send_queue, recv_queue=load_recv_queue) if num_iter % config['print_freq'] == 0: #print 'training @ iter = ', num_iter #print 'training cost:', cost_ij logger.info("training @ iter = %i" % (num_iter)) logger.info("training cost: %lf" % (cost_ij)) if config['print_train_error']: logger.info('training error rate: %lf' % train_error()) #print 'training error rate:', train_error() if flag_para_load and (count < len(minibatch_range)): load_send_queue.put('calc_finished') ############### Test on Validation Set ################## #""" DropoutLayer.SetDropoutOff() # result_list = [ this_validation_error, this_validation_error_top5, this_validation_loss ] # or # result_list = [ this_validation_error, this_validation_loss ] result_list = get_val_error_loss( #this_validation_error, this_validation_loss = get_val_error_loss( rand_arr, shared_x, shared_y, val_filenames, val_labels, flag_para_load, img_mean, batch_size, validate_model, send_queue=load_send_queue, recv_queue=load_recv_queue, flag_top_5=flag_top5) logger.info(('epoch %i: validation loss %f ' % (epoch, result_list[-1]))) #print('epoch %i: validation loss %f ' % # (epoch, this_validation_loss)) if flag_top5: logger.info(('epoch %i: validation error (top 1) %f %%, (top5) %f %%' % (epoch, result_list[0] * 100., result_list[1] * 100.))) else: logger.info(('epoch %i: validation error %f %%' % (epoch, result_list[0] * 100.))) #print('epoch %i: validation error %f %%' % # (epoch, this_validation_error * 100.)) val_record.append(result_list) #val_record.append([this_validation_error, this_validation_loss]) np.save(config['weights_dir'] + 'val_record.npy', val_record) DropoutLayer.SetDropoutOn() ############################################ # Adapt Learning Rate step_idx = adjust_learning_rate(config, epoch, step_idx, val_record, learning_rate) # Save weights if epoch % config['snapshot_freq'] == 0: save_weights(layers, config['weights_dir'], epoch) np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy', learning_rate.get_value()) save_momentums(vels, config['weights_dir'], epoch) #""" print('Optimization complete.')
def train_net(config, private_config): # UNPACK CONFIGS (flag_para_load, train_filenames, val_filenames, train_labels, val_labels, img_mean) = \ unpack_configs(config, ext_data=private_config['ext_data'], ext_label=private_config['ext_label']) gpu_send_queue = private_config['queue_gpu_send'] gpu_recv_queue = private_config['queue_gpu_recv'] # pycuda and zmq set up drv.init() dev = drv.Device(int(private_config['gpu'][-1])) ctx = dev.make_context() sock_gpu = zmq.Context().socket(zmq.PAIR) if private_config['flag_client']: sock_gpu.connect('tcp://*****:*****@ iter = ', num_iter log_iter.write("%d\n" % num_iter) log_iter.flush() print 'training cost:', cost_ij log_err_cost.write("%f\n" % cost_ij) log_err_cost.flush() if config['print_train_error']: error_ij = train_error() gpu_send_queue.put(error_ij) that_error = gpu_recv_queue.get() error_ij = (error_ij + that_error) / 2. if private_config['flag_verbose']: print 'training error rate:', error_ij log_err_rate.write("%f\n" % error_ij) log_err_rate.flush() if flag_para_load and (count < len(minibatch_range)): load_send_queue.put('calc_finished') if count%20 == 0: e = time.time() print "time per 20 iter:", (e - s) ############### Test on Validation Set ################## DropoutLayer.SetDropoutOff() this_val_error, this_val_loss = get_val_error_loss( rand_arr, shared_x, shared_y, val_filenames, val_labels, flag_para_load, img_mean, batch_size, validate_model, send_queue=load_send_queue, recv_queue=load_recv_queue) # report validation stats gpu_send_queue.put(this_val_error) that_val_error = gpu_recv_queue.get() this_val_error = (this_val_error + that_val_error) / 2. gpu_send_queue.put(this_val_loss) that_val_loss = gpu_recv_queue.get() this_val_loss = (this_val_loss + that_val_loss) / 2. if private_config['flag_verbose']: print('epoch %i: validation loss %f ' % (epoch, this_val_loss)) print('epoch %i: validation error %f %%' % (epoch, this_val_error * 100.)) val_record.append([this_val_error, this_val_loss]) if private_config['flag_save']: np.save(config['weights_dir'] + 'val_record.npy', val_record) np.savetxt(config['weights_dir'] + 'val_record_txt.txt', val_record) DropoutLayer.SetDropoutOn() ############################################ # Adapt Learning Rate step_idx = adjust_learning_rate(config, epoch, step_idx, val_record, learning_rate) # Save Weights, only one of them will do if private_config['flag_save']: if epoch % config['snapshot_freq'] == 0: save_weights(layers, config['weights_dir'], epoch) np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy', learning_rate.get_value()) save_momentums(vels, config['weights_dir'], epoch) print('Optimization complete.')