def rank_transfer(train_generator, val_generator, source_model_path, target_model_path, batch_size=48): model = rank_transfer_model(source_model_path) model.compile( optimizer=SGD(lr=0.001, momentum=0.9), # 'adam', # optimizer='adadelta', loss={ # 'sub_score': 'mse', 'score1': 'binary_crossentropy', 'score2': 'binary_crossentropy', # 'sub_score': 'mse' }, loss_weights={ # 'sub_score': 0.5, 'score1': 0.5, 'score2': 0.5 }, metrics=['accuracy']) early_stopping = EarlyStopping(monitor='val_loss', patience=5) auto_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0) monitor_bn_nan = MonitorNanOnBN() if 'market-' in target_model_path: train_data_cnt = 3000 val_data_cnt = 300 else: train_data_cnt = 160 val_data_cnt = 18 safe_remove(target_model_path) model.fit_generator( train_generator, steps_per_epoch=train_data_cnt / batch_size + 1, epochs=50, validation_data=val_generator, validation_steps=val_data_cnt / batch_size + 1, callbacks=[ auto_lr, # early_stopping, # monitor_bn_nan, ModelCheckpoint(target_model_path, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1) ])
def test_predict(net, probe_path, gallery_path, pid_path, score_path): net = Model(inputs=[net.input], outputs=[net.get_layer('avg_pool').output]) test_f, test_info = extract_feature(gallery_path, net) query_f, query_info = extract_feature(probe_path, net) result, result_argsort = sort_similarity(query_f, test_f) for i in range(len(result)): result[i] = result[i][result_argsort[i]] result = np.array(result) safe_remove(pid_path) safe_remove(score_path) np.savetxt(pid_path, result_argsort, fmt='%d') np.savetxt(score_path, result, fmt='%.4f')
def test_predict(net, probe_path, gallery_path, pid_path, score_path): #net = Model(inputs=[net.input], outputs=[net.output]) #print("output shape:", net.output.shape) test_f, test_info = extract_feature(gallery_path, net) ###19732 query_f, query_info = extract_feature(probe_path, net) ##3368 result, result_argsort = sort_similarity(query_f, test_f) for i in range(len(result)): result[i] = result[i][result_argsort[i]] result = np.array(result) safe_remove(pid_path) safe_remove(score_path) np.savetxt(pid_path, result_argsort, fmt='%d') np.savetxt(score_path, result, fmt='%.4f')
def test_sepbn_predict(net_path, probe_path, gallery_path, pid_path, score_path): model = load_model(net_path, custom_objects={'cross_entropy_loss': cross_entropy_loss}) model = Model(inputs=[model.get_layer('resnet50').get_input_at(0)[1]], outputs=[model.get_layer('resnet50').get_output_at(0)[1]]) test_f, test_info = extract_feature(gallery_path, model) query_f, query_info = extract_feature(probe_path, model) result, result_argsort = sort_similarity(query_f, test_f) for i in range(len(result)): result[i] = result[i][result_argsort[i]] result = np.array(result) safe_remove(pid_path) safe_remove(score_path) np.savetxt(pid_path, result_argsort, fmt='%d') np.savetxt(score_path, result, fmt='%.4f')
def rank_transfer(train_generator, val_generator, source_model_path, target_model_path, batch_size=48): model = rank_transfer_model(source_model_path) plot_model(model, 'rank_model.png') model.compile( optimizer=SGD(lr=0.001, momentum=0.9), # 'adam', # optimizer='adam', loss={ 'sub_score': cross_entropy_loss, #'score1': 'binary_crossentropy', #'score2': 'binary_crossentropy' # 'sub_score': 'mse' }, loss_weights={ 'sub_score': 1, # 'score1': 0.5, # 'score2': 0.5 }, # metrics=['accuracy'] ) early_stopping = EarlyStopping(monitor='val_loss', patience=3) auto_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0) if 'market-' in target_model_path: train_data_cnt = 16500 val_data_cnt = 1800 else: train_data_cnt = 1600 val_data_cnt = 180 model.fit_generator(train_generator, steps_per_epoch=train_data_cnt / batch_size + 1, epochs=5, validation_data=val_generator, validation_steps=val_data_cnt / batch_size + 1, callbacks=[early_stopping, auto_lr]) safe_remove(target_model_path) # model.save('simple_rank_transfer.h5') model.save(target_model_path)
def lmp_test_predict(net, probe_path, gallery_path, pid_path, score_path): x = net.layers[173].output x = MaxPooling2D(pool_size=(7, 2), data_format='channels_last', padding='same', name='lmp')(x) x = AveragePooling2D(pool_size=(1, 4))(x) net = Model(inputs=[net.input], outputs=[x]) test_f, test_info = extract_feature(gallery_path, net) query_f, query_info = extract_feature(probe_path, net) result, result_argsort = sort_similarity(query_f, test_f) for i in range(len(result)): result[i] = result[i][result_argsort[i]] result = np.array(result) safe_remove(pid_path) safe_remove(score_path) np.savetxt(pid_path, result_argsort, fmt='%d') np.savetxt(score_path, result, fmt='%.4f')
pair_generator_by_rank_list(train_images, batch_size, similar_persons, similar_matrix, train=False), target, batch_size=batch_size, num_classes=class_count ) if __name__ == '__main__': # sources = ['cuhk_grid_viper_mix'] sources = ['cuhk'] target = 'market' pair_model('../pretrain/cuhk_pair_pretrain.h5', 751) for source in sources: pair_pretrain_on_dataset(source, target) transform_dir = '/home/cwh/coding/Market-1501' safe_remove('pair_transfer_pid.log') test_pair_predict('market_pair_pretrain.h5', transform_dir + '/probe', transform_dir + '/test', 'pair_transfer_pid.log', 'pair_transfer_score.log') market_result_eval('pair_transfer_pid.log', TEST='/home/cwh/coding/Market-1501/test', QUERY='/home/cwh/coding/Market-1501/probe') # sources = ['grid-cv-%d' % i for i in range(10)] # for source in sources: # softmax_pretrain_on_dataset(source, # project_path='/home/cwh/coding/rank-reid', # dataset_parent='/home/cwh/coding') # pair_pretrain_on_dataset(source, # project_path='/home/cwh/coding/rank-reid', # dataset_parent='/home/cwh/coding')