ts.MEAN, ts.STD, ts.IMAGE_HEIGHT, ts.IMAGE_WIDTH, random_hor_flipping=ts.HORIZONTAL_FLIPPING, random_ver_flipping=ts.VERTICAL_FLIPPING, random_90x_rotation=ts.ROTATION_90X, random_rotation=ts.ROTATION, random_color_jittering=ts.COLOR_JITTERING, use_coordinates=ts.USE_COORDINATES) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchsize, shuffle=False, num_workers=opt.nworkers, pin_memory=pin_memory, collate_fn=test_align_collate) # Define Model model = Model(opt.dataset, ts.N_CLASSES, ts.MAX_N_OBJECTS, use_instance_segmentation=ts.USE_INSTANCE_SEGMENTATION, use_coords=ts.USE_COORDINATES, load_model_path=opt.model, usegpu=opt.usegpu) # Train Model model.fit(ts.CRITERION, ts.DELTA_VAR, ts.DELTA_DIST, ts.NORM, ts.LEARNING_RATE, ts.WEIGHT_DECAY, ts.CLIP_GRAD_NORM, ts.LR_DROP_FACTOR, ts.LR_DROP_PATIENCE, ts.OPTIMIZE_BG, ts.OPTIMIZER, ts.TRAIN_CNN, opt.nepochs, ts.CLASS_WEIGHTS, train_loader, test_loader, model_save_path, opt.debug)
random_hor_flipping=ts.HORIZONTAL_FLIPPING, random_ver_flipping=ts.VERTICAL_FLIPPING, random_transposing=ts.TRANSPOSING, random_90x_rotation=ts.ROTATION_90X, random_rotation=ts.ROTATION, random_color_jittering=ts.COLOR_JITTERING, random_grayscaling=ts.GRAYSCALING, random_channel_swapping=ts.CHANNEL_SWAPPING, random_gamma=ts.GAMMA_ADJUSTMENT, random_resolution=ts.RESOLUTION_DEGRADING) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchsize, shuffle=False, num_workers=opt.nworkers, pin_memory=pin_memory, collate_fn=test_align_collate) # Define Model model = Model(opt.dataset, ts.MODEL_NAME, ts.N_CLASSES, ts.MAX_N_OBJECTS, use_instance_segmentation=ts.USE_INSTANCE_SEGMENTATION, use_coords=ts.USE_COORDINATES, load_model_path=opt.model, usegpu=opt.usegpu) # Train Model model.fit(ts.CRITERION, ts.DELTA_VAR, ts.DELTA_DIST, ts.NORM, ts.LEARNING_RATE, ts.WEIGHT_DECAY, ts.CLIP_GRAD_NORM, ts.LR_DROP_FACTOR, ts.LR_DROP_PATIENCE, ts.OPTIMIZE_BG, ts.OPTIMIZER, ts.TRAIN_CNN, opt.nepochs, ts.CLASS_WEIGHTS, train_loader, test_loader, model_save_path, opt.debug)
model = Model(args.n_stations, s.MOVING_HORIZON, s.ACTIVATION, s.CRITERION, usegpu=args.usegpu) # Train First RNN [X_train, y_train], [X_val, y_val], [X_test, y_test] = data.load_data_lstm_1() rnn_model_num = 1 print '#' * 10 + ' RNN 1 ' + '#' * 10 train_loader = torch.utils.data.DataLoader(Loader((X_train, y_train)), batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers, pin_memory=pin_memory) val_loader = torch.utils.data.DataLoader(Loader((X_val, y_val)), batch_size=args.batch_size, shuffle=False, num_workers=args.n_workers, pin_memory=pin_memory) model.fit(rnn_model_num, s.LEARNING_RATE, s.WEIGHT_DECAY, s.CLIP_GRAD_NORM, s.LR_DROP_FACTOR, s.LR_DROP_PATIENCE, s.PATIENCE, s.OPTIMIZER, s.N_EPOCHS[rnn_model_num - 1], train_loader, val_loader, model_save_path.format(rnn_model_num)) # Train Other RNNs for rnn_model_num in range(2, s.MOVING_HORIZON + 1): X_train, y_train = data.load_data(X_train, y_train, model, rnn_model_num - 1) X_val, y_val = data.load_data(X_val, y_val, model, rnn_model_num - 1) print '#' * 10 + ' RNN {} '.format(rnn_model_num) + '#' * 10 train_loader = torch.utils.data.DataLoader(Loader((X_train, y_train)), batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers, pin_memory=pin_memory) val_loader = torch.utils.data.DataLoader(Loader((X_val, y_val)), batch_size=args.batch_size, shuffle=False, num_workers=args.n_workers, pin_memory=pin_memory) model.fit(rnn_model_num, s.LEARNING_RATE, s.WEIGHT_DECAY, s.CLIP_GRAD_NORM, s.LR_DROP_FACTOR, s.LR_DROP_PATIENCE, s.PATIENCE, s.OPTIMIZER, s.N_EPOCHS[rnn_model_num - 1],