1.0, 0.4, # cat feature 1 -1.0, 3.0, -1.0, -0.5, # cat feature 2 0.00001, # n1 0.7 ]), bias=0.5) train_data = SyntheticDataGenerator(config).data test_data = SyntheticDataGenerator(config).data l2 = 3 model = RobustLogisticModel(1, l2_epsilon=l2, config=config) inputs, y = next(iter(train_data())) y_ = model(inputs) # just to build the model and get input shapes # run an exp run(config, train_data, test_data, l2, robust_frac=0.5, epochs=4, lr=0.01) # test various perturbations inputs_pert_num = model.perturb_numeric(inputs, y) inputs_pert_cat = model.perturb_categorical(inputs, y) a = 1
from src.train import run from src.envs import get_envs if __name__ == '__main__': # os.putenv('SDL_VIDEODRIVER', 'fbcon') # os.environ["SDL_VIDEODRIVER"] = "dummy" envs = get_envs() parser = argparse.ArgumentParser() parser.add_argument('--update_on', default=False, action="store_true") parser.add_argument('--env_num', default=0) parser.add_argument('--seed_num', default=0) parser.add_argument('--sequence_length', default=8) parser.add_argument('--replay_memory', default=100) args = parser.parse_args() print(envs[int(args.env_num)].name, envs[int(args.env_num)].max_episode) run(envs[int(args.env_num)], int(args.seed_num), args.update_on, int(args.sequence_length), int(args.replay_memory)) send('Complete {}, {}, {}, {}, {}'.format(envs[int(args.env_num)].name, args.seed_num, str(args.update_on), args.sequence_length, args.replay_memory)) # send('Complete : ' + envs[int(args.env_num)].name + '\n seed: ' + args.seed_num + '\n update-on' + str(args.update_on)) # python test.py --update_on --env_num=0 --seed_num=100 --sequence_length=8 --replay_memory=100
l2_epsilon=0.0, num_perturbed_categoricals=1, col_spec=col_spec, label_name=target) pd.set_option('display.max_columns', 500) pd.set_option('max_colwidth', 1000) pd.set_option('display.width', 1000) epochs = 400 batch = 20 lr = 0.01 res_10 = run(config, X, y, robust_frac=0.1, epochs=epochs, batch_size=batch, lr=lr, verbose=False) res_20 = run(config, X, y, robust_frac=0.2, epochs=epochs, batch_size=batch, lr=lr, verbose=False) res_50 = run(config, X,
def main(): train.run(n_agents, load_path, model_path, starting_index, final_index, batch, load_model)
from env import OhmniInSpace from src import train if sys.argv[1] == '--test': if sys.argv[2] == 'py-env': ois = OhmniInSpace.PyEnv(gui=True) timestep = ois.reset() while not timestep.is_last(): timestep = ois.step(action=(0.4, 0.4)) (_, reward, discount, observation) = timestep print('Reward:', reward) ois.render() cv.imshow('Segmentation', observation) if cv.waitKey(10) & 0xFF == ord('q'): break if sys.argv[2] == 'tf-env': ois = OhmniInSpace.TfEnv() tf_env = ois.gen_env() print("TimeStep Specs:", tf_env.time_step_spec()) print("Action Specs:", tf_env.action_spec()) elif sys.argv[1] == '--ohmni': if sys.argv[2] == 'train': train.train() if sys.argv[2] == 'run': train.run() else: print("Error: Invalid option!")
train.display_message('+++++++++++++Creating DataLoaders+++++++++++++') train_ds, valid_ds = train.get_datasets(path_dogs, human_train, human_valid, stats=batch_stat, size=args.img_size) bs = args.batch_size dls = train.get_dls(train_ds, valid_ds, bs=bs) train.display_message( '+++++++++++++Getting Model ready for training+++++++++++++') model = models.ModelScratch() device = train.get_device() model.to(device) optimizer = optim.Adam(model.parameters(), lr=args.lr) criterion = loss_func.CustomLoss(train_ds.dog_human_labeller) recorder = metrics.Recorder() n_epochs = args.n_epochs train.run(n_epochs, model, optimizer, criterion, dls, device, recorder, max_lr=args.max_lr, env='shell') utils.save_model( model, f'model_scratch_{n_epochs}_{recorder.valid_acc_breed[-1].item():.2f}', train_ds.breed_labeller, train_ds.dog_human_labeller, batch_stat)
elif len(sensor) == 2 and 's1' in sensor and 's2' in sensor: model = Model_S1S2(drop, n_classes) elif len(sensor) == 2 and 's2' in sensor and 'spot' in sensor: model = Model_S2SPOT(drop, n_classes) elif len(sensor) == 1 and 's1' in sensor: model = Model_S1(drop, n_classes) elif len(sensor) == 1 and 's2' in sensor: model = Model_S2(drop, n_classes) elif len(sensor) == 1 and 'spot' in sensor: model = Model_SPOT(drop, n_classes) # Learning stage checkpoint_path = os.path.join(out_path, 'model') run(model, train_S1, train_S2, train_MS, train_Pan, train_y, valid_S1, valid_S2, valid_MS, valid_Pan, valid_y, checkpoint_path, batch_size, lr, n_epochs, sensor, weight) # Load Test set test_y = format_y(gt_path + '/test_gt.npy', encode=False) print('Test GT:', test_y.shape) if 's1' in sensor: test_S1 = format_cnn2d(s1_path + '/test_S1.npy') print('Test S1:', test_S1.shape) else: test_S1 = None if 's2' in sensor: test_S2 = format_cnn1d(s2_path + '/test_S2.npy') print('Test S2:', test_S2.shape)