def main(model, x, y, cuda, optimizer_name): optimizer_dic = { 'sgd': torch.optim.SGD(model.modules_list.parameters(), lr=model.config["learning_rate"], momentum=model.config["momentum"], weight_decay=model.config["decay"]), 'adam': torch.optim.Adam(model.modules_list.parameters(), lr=model.config["learning_rate"], weight_decay=model.config["decay"]) } optimizer = optimizer_dic[optimizer_name.lower()] one_off_set = Within_n_rank(1) two_off_set = Within_n_rank(2) scorer_list = { 'quadratic_weighted_kappa': quadratic_weighted_kappa, 'f1_score': f1_score, 'one_off_set': one_off_set, 'two_off_set': two_off_set, 'accuracy_score': accuracy_score } s = StratifiedShuffleSplit(n_splits=1, test_size=0.2) train_index, val_index = next(iter(s.split(x, y))) train_x, train_y = (x.iloc[train_index, :].reset_index(drop=True), y[train_index].reset_index(drop=True)) valid_x, valid_y = (x.iloc[val_index, :].reset_index(drop=True), y[val_index].reset_index(drop=True)) train_data = CustData(train_x, train_y, model.config['label_num'], train=True) train_loader = DataLoader(train_data, shuffle=True, batch_size=int(len(train_data) * 0.9), num_workers=0, worker_init_fn=worker_init_fn) test_data = CustData(valid_x, valid_y, model.config['label_num'], train=True) test_loader = DataLoader(test_data, shuffle=False, batch_size=len(test_data), num_workers=0, worker_init_fn=worker_init_fn) # Start training train(model, optimizer, train_loader, test_loader, scorer_list, cuda, optimizer_name.lower())
def conf_map_frame(iou_conf, conf_list): cuda = True specific_conf = 0.9 cfg_path = "../4Others/color_ball.cfg" test_root_dir = "../1TestData" test_label_csv_mame = '../1TestData/label.csv' classes = load_classes('../4Others/color_ball.names') blocks = parse_cfg(cfg_path) model = yolo_v3(blocks) checkpoint_path = "../4TrainingWeights/experiment/2018-11-30_05_19_48.404150/2018-11-30_05_59_33.719706_model.pth" checkpoint = torch.load(checkpoint_path) model.load_state_dict(checkpoint) model = model.cuda() test_transform = transforms.Compose([ transforms.Resize(model.net["height"]), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) test_data = CustData(test_label_csv_mame, test_root_dir, transform=test_transform) test_loader = DataLoader(test_data, shuffle=False, batch_size=model.net["batch"], collate_fn=my_collate, num_workers=6) best_map, best_ap, best_conf, specific_conf_map, specific_conf_ap,\ map_frame = get_map(model, test_loader, cuda, conf_list, iou_conf, classes, False, specific_conf, True) return best_map, best_ap, best_conf, specific_conf_map, specific_conf_ap, \ map_frame
def main(param_dir): cfg_path = "../4Others/yolo.cfg" test_root_dir = "../2CvTrain" test_label_csv_mame = '../2CvTrain/label.csv' blocks = parse_cfg(cfg_path) label_csv_mame = '../2CvTrain/label.csv' params = prep_params(param_dir, label_csv_mame) from yolo_v3 import yolo_v3 model = yolo_v3(params, blocks) test_transform = transforms.Compose([ transforms.Resize(model.params["height"]), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) test_data = CustData(test_label_csv_mame, transform=test_transform) test_loader = DataLoader(test_data, shuffle=False, batch_size=model.params["batch_size"], collate_fn=my_collate, num_workers=0) start = time.time() best_map, best_ap, best_conf, specific_conf_map, specific_conf_ap,\ map_frame = get_map(model, test_loader, train=False, loop_conf=False) print(time.time() - start) return best_map, best_ap, best_conf, specific_conf_map, specific_conf_ap, \ map_frame
def main(): seed = 1 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) cuda = True train, test = prep_data(pca=True, pca_scale=True, inputation=True, strategy='median', remove_low_variance=False) columns_to_drop = ['Response'] test = test.drop(columns_to_drop, axis=1) test_data = CustData(test) test_loader = DataLoader(test_data, batch_size=len(test_data), num_workers=6, worker_init_fn=worker_init_fn) #with open('../5Others/config.txt', 'rb') as fp: with open('../4TrainingWeights/tuning_2/num_node_layer_num/num_nodes_256_layer_number_2/2019-01-15_16_49_59.215415\\2019-01-15_18_54_05.071129.txt', 'rb') as fp: #with open('../4TrainingWeights/2019-01-06_09_45_38.867660/2019-01-06_11_28_41.798519.txt', 'rb') as fp: param = json.load(fp) input_dim = len(test.columns) model = Ordinal_regression(create_module, config=param, input_dim=input_dim) #state_dic = torch.load('../4TrainingWeights/2019-01-06_20_43_56.362198/2019-01-06_21_04_05.995207.pth') #model.load_state_dict(state_dic) if cuda: model.cuda() model.eval() final_prediction = prediction(model, test_loader, cuda) # test_loader = DataLoader(test_data, batch_size=len(test_data), num_workers=6, # worker_init_fn=worker_init_fn) # y, final_prediction = prediction(model, test_loader, cuda, label= True) # len(y[abs(y-final_prediction) <= 2]) / len(y) # y = list(map(int, y)) # accuracy_score(y, final_prediction) submission = pd.read_csv('../1TestData/sample_submission.csv', index_col=0) submission['Response'] = final_prediction.astype('int32') submission.to_csv('submit.csv')
def test_loaders(): label_csv_mame = '../1TestData/label.csv' name_list = ["img_name", "c", "gx", "gy", "gw", "gh"] test_img_txt_path = "../1TestData/*.txt" prep_labels(test_img_txt_path, name_list, label_csv_mame) logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s %(filename)s] %(message)s") # training transformation pre_trans = RandomCrop(jitter=0.25, inp_dim=480) train_transform = transforms.Compose( [transforms.ColorJitter( brightness=0.25, saturation=0.25 ), transforms.ToTensor() ]) test_pre_trans = OfssetLabels(resize=True, input_dim=480) test_transform = transforms.Compose( [ImgToTensorCv()]) train_data = CustData('../1TrainData/label.csv', pre_trans=pre_trans, transform=train_transform) # test_data = CustData(test_label_csv_mame, # transform=test_transform) test_data = CustDataCV('../1TestData/label.csv', pre_trans=test_pre_trans, transform=test_transform) train_loader = DataLoader(train_data, shuffle=False, batch_size=2, collate_fn=my_collate, num_workers=0, worker_init_fn=worker_init_fn) test_loader = DataLoader(test_data, shuffle=False, batch_size=4, collate_fn=my_collate, num_workers=0, worker_init_fn=worker_init_fn) # print("running train loader") # for step, samples in enumerate(train_loader): # if step < 200: # images, labels = samples["image"], samples["label"] # for img, label in zip(images, labels): # img = img.permute(1, 2, 0).contiguous().numpy() # draw_boxes(img, label) # else: # break print("running test loader") for step, samples in enumerate(test_loader): if step < 2: images, labels = samples["image"], samples["label"] for img, label in zip(images, labels): img = img.permute(1, 2, 0).contiguous().numpy() draw_boxes(img, label) else: break
def main(): cuda = True cfg_path = "../4Others/color_ball.cfg" test_root_dir = "../1TestData" test_label_csv_mame = '../1TestData/label.csv' classes = load_classes('../4Others/color_ball.names') blocks = parse_cfg(cfg_path) model = yolo_v3(blocks) conf_list = np.arange(start=0.2, stop=0.95, step=0.025) checkpoint_path = "../4TrainingWeights/2018-11-07_23_13_38.391465/2018-11-08_02_45_20.195250_model.pth" checkpoint = torch.load(checkpoint_path) model.load_state_dict(checkpoint) model = model.cuda() test_transform = transforms.Compose([ transforms.Resize(model.net["height"]), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) test_data = CustData(test_label_csv_mame, test_root_dir, transform=test_transform) test_loader = DataLoader(test_data, shuffle=True, batch_size=1, collate_fn=my_collate, num_workers=4) start = time.time() sum_ap, sum_map, ap, mAP, map_frame = get_map(model, test_loader, cuda, conf_list, 0.5, classes, train=False) print(time.time() - start) return sum_ap, sum_map, ap, mAP, map_frame
# if param['pretrain_snapshot']: # state_dic = torch.load(param['pretrain_snapshot']) # model.load_state_dict(state_dic) # model.apply(weights_init) print(model.state_dict()) print(model.modules_list) main(model, x_dropped, y, cuda, optimizer_name='adam') # ============================================================================= # prediction # ============================================================================= test_x = test_set.drop(columns_to_drop, axis=1) test_x_scaled = test_x.copy() test_x_scaled[:] = scaler.transform(test_x_scaled) test_x_dropped = test_x_scaled.drop(zero_coef, axis=1) test_data = CustData(test_x_dropped) test_loader = DataLoader(test_data, batch_size=len(test_data), num_workers=6, worker_init_fn=worker_init_fn) #with open('../5Others/config.txt', 'rb') as fp: with open( '../4TrainingWeights/with_lasso_fs/2019-01-09_11_55_22.761526/2019-01-09_12_54_54.655013.txt', 'rb') as fp: #with open('../4TrainingWeights/2019-01-06_09_45_38.867660/2019-01-06_11_28_41.798519.txt', 'rb') as fp: param = json.load(fp) model = Ordinal_regression(create_module, config=param) #state_dic = torch.load('../4TrainingWeights/2019-01-06_20_43_56.362198/2019-01-06_21_04_05.995207.pth') #model.load_state_dict(state_dic) if cuda: model.cuda()
param = json.load(fp) model = Ordinal_regression(create_module, config=param) # if param['pretrain_snapshot']: # state_dic = torch.load(param['pretrain_snapshot']) # model.load_state_dict(state_dic) # model.apply(weights_init) print(model.state_dict()) print(model.modules_list) main(model, x_scaled, y, cuda, optimizer_name='adam') # ============================================================================= # prediction # ============================================================================= test_x = test_set.drop(columns_to_drop, axis=1) test_x_scaled = test_x.copy() test_x_scaled[:] = scaler.transform(test_x_scaled) test_data = CustData(test_x_scaled) test_loader = DataLoader(test_data, batch_size=len(test_data), num_workers=6, worker_init_fn=worker_init_fn) #with open('../5Others/config.txt', 'rb') as fp: with open( '../4TrainingWeights/with_drop_out_leaky/2019-01-09_11_55_25.667333/2019-01-09_13_34_11.010203.txt', 'rb') as fp: #with open('../4TrainingWeights/2019-01-06_09_45_38.867660/2019-01-06_11_28_41.798519.txt', 'rb') as fp: param = json.load(fp) model = Ordinal_regression(create_module, config=param) #state_dic = torch.load('../4TrainingWeights/2019-01-06_20_43_56.362198/2019-01-06_21_04_05.995207.pth') #model.load_state_dict(state_dic) if cuda: model.cuda()
def run_training(params, label_csv_mame, name_list, test_label_csv_mame, test_img_txt_path, cfg_path, params_dir, valid_label_csv_mame=False, valid_img_txt_path=False): random.seed(params['seed']) np.random.seed(params['seed']) torch.manual_seed(params['seed']) torch.cuda.manual_seed(params['seed']) logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s %(filename)s] %(message)s") # prepare test lables prep_labels(test_img_txt_path, name_list, test_label_csv_mame) # parse model architect from architect config file blocks = parse_cfg(cfg_path) # training transformation pre_trans = RandomCrop(jitter=params['rand_crop'], inp_dim=params["height"]) train_transform = transforms.Compose([ transforms.ColorJitter(brightness=params["exposure"], saturation=params["saturation"]), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) test_pre_trans = OfssetLabels(resize=True, input_dim=params['height']) test_transform = transforms.Compose([ ImgToTensorCv(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) train_data = CustData(label_csv_mame, pre_trans=pre_trans, transform=train_transform) test_data = CustDataCV(test_label_csv_mame, pre_trans=test_pre_trans, transform=test_transform) train_loader = DataLoader(train_data, shuffle=True, batch_size=params["batch_size"], collate_fn=my_collate, num_workers=params['num_workers'], worker_init_fn=worker_init_fn) test_loader = DataLoader(test_data, shuffle=False, batch_size=params["batch_size"], collate_fn=my_collate, num_workers=params['num_workers'], worker_init_fn=worker_init_fn) # initiate model model = yolo_v3(params, blocks) # Initiate validation data loader if there is any if valid_label_csv_mame: valid_data = CustDataCV(valid_label_csv_mame, transform=test_transform) valid_loader = DataLoader(valid_data, shuffle=False, batch_size=params["batch_size"], collate_fn=my_collate, num_workers=params['num_workers'], worker_init_fn=worker_init_fn) # Start training best_map, best_ap, best_conf, specific_conf_map, specific_conf_ap,\ map_frame = model.fit(train_loader, valid_loader, test_loader) else: # Start training best_map, best_ap, best_conf, specific_conf_map, specific_conf_ap, \ map_frame = model.fit(train_loader, test_loader) return best_map, best_ap, best_conf, specific_conf_map, specific_conf_ap, \ map_frame