def __init__(self, action_size, device, writer, is_dueling=False): self.action_size = action_size self.device = device self.memoryBuffer = PrioritizedMemory(params.buffer_size, params.batch_size, device) self.current_step = 0 self.writer = writer if is_dueling: print("Creating dueling network") self.local_network = Dueling_network(action_size).to(device) self.target_network = Dueling_network(action_size).to(device) self.predict_network = Dueling_network(action_size).to(device) else: print("Creating network without dueling") self.local_network = Network(action_size).to(device) self.target_network = Network(action_size).to(device) self.predict_network = Network(action_size).to(device) for predict_param, local_param in zip( self.predict_network.parameters(), self.local_network.parameters()): predict_param.data.copy_(local_param.data) self.optimizer = optim.Adam(self.local_network.parameters(), lr=params.learning_rate)
def __init__(self, in_actor, hidden_in_actor, hidden_out_actor, out_actor, in_critic, hidden_in_critic, hidden_out_critic, lr_actor=5e-4, lr_critic=5e-4): super(DDPGAgent, self).__init__() self.actor = Network(in_actor, hidden_in_actor, hidden_out_actor, out_actor, actor=True).to(device) self.critic = Network(in_critic, hidden_in_critic, hidden_out_critic, 1).to(device) self.target_actor = Network(in_actor, hidden_in_actor, hidden_out_actor, out_actor, actor=True).to(device) self.target_critic = Network(in_critic, hidden_in_critic, hidden_out_critic, 1).to(device) self.noise = OUNoise(out_actor, scale=1.0) self.actor_optimizer = Adam(self.actor.parameters(), lr=lr_actor) self.critic_optimizer = Adam(self.critic.parameters(), lr=lr_critic, weight_decay=0)
def main(): genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) #start_epochs = 0 model.drop_path_prob = 0 stat(model, (3, 224, 224)) genotype = eval("genotypes.%s" % "MY_DARTS") model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype) model.drop_path_prob = 0 stat(model, (3, 224, 224))
def __init__(self, env): self.env = env self.memory = deque(maxlen=10000) self.g_step = 0 self.GAMMA = 0.999 self.batch_size = 64 self.online_model = Network().cuda() self.target_model = Network().cuda() self.pretrain = Network().cuda() self.pretrain.load_state_dict(torch.load('pre-train.pt')) self.exploration = LinearSchedule(10000000,0.1,1.0) self.optimizer = torch.optim.RMSprop(self.online_model.parameters(), lr=1e-4) self.lastValid = 0 self.lastAction = None
def main(): if not torch.cuda.is_available(): print('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) print('gpu device = %d' % args.gpu) print("args = %s", args) model = Network() model = model.cuda() model_dict = torch.load('./ckpt/' + args.model + '.pt') model.load_state_dict(model_dict) for p in model.parameters(): p.requires_grad = False with torch.no_grad(): for _, (input, image_name) in enumerate(test_queue): input = Variable(input, volatile=True).cuda() image_name = image_name[0].split('.')[0] u_list, r_list = model(input) u_name = '%s.png' % (image_name) u_path = save_path + '/' + u_name print('processing {}'.format(u_name)) if args.model == 'lol': save_images(u_list[-1], u_path) elif args.model == 'upe' or args.model == 'dark': save_images(u_list[-2], u_path)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) cudnn.benchmark = True cudnn.enabled=True logging.info("args = %s", args) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, genotype) if args.parallel: model = MyDataParallel(model).cuda() else: model = model.cuda() bin_op = bin_utils.BinOp(model, args) _, valid_transform = utils._data_transforms_cifar10(args) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) utils.load(model, args.path_to_weights) if args.parallel: model.module.drop_path_prob = args.drop_path_prob * (args.epochs-1) / args.epochs else: model.drop_path_prob = args.drop_path_prob * (args.epochs-1) / args.epochs valid_acc, valid_obj = infer(valid_queue, model, criterion, bin_op) logging.info('valid_acc %f', valid_acc)
def main(): np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True cudnn.enabled = True torch.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_ch, 10, args.layers, args.auxiliary, genotype).cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.wd) train_transform, valid_transform = utils._data_transforms_cifar10(args) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batchsz, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batchsz, shuffle=False, pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs valid_acc, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc: %f', valid_acc) train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc: %f', train_acc) utils.save(model, os.path.join(args.save, 'trained.pt')) print('saved to: trained.pt')
def main(): if not torch.cuda.is_available(): sys.exit(1) ## step 1 construct the selected network genotype = eval("genotypes.%s" % args.selected_arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) ## step 2 load pretrained model parameter if args.cifar100: model = torch.nn.DataParallel(model) model = model.cuda() model.load_state_dict(torch.load(args.model_path)['net']) else: utils.load(model, args.model_path) model = torch.nn.DataParallel(model) model = model.cuda() model.module.drop_path_prob = 0 model.drop_path_prob = 0 print("param size = %fMB" % utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() ## step 3 load test data valid_queue = load_data_cifar(args) ## step 4. inference on test data valid_acc, valid_obj = infer(valid_queue, model, criterion) print('-----------------------------------------------') print('Average Valid_acc: %f ' % valid_acc) print('-----------------------------------------------')
def __init__(self, out_dim, device, mode=Mode.PER_STUDY, model_path=None): self.device = device self.mode = mode self.model = Network(out_dim, mode=mode).to(self.device) self.thresholds = np.zeros(out_dim) if model_path is not None: self.load_model(model_path)
def load_network(): network = Network() #build_route_nodes(network, route2) #build_route_nodes(network, route7) #build_route_nodes(network, route1) return network
def build_model(data): """ Builds the neural model architecture for use in training or testing. """ fasttext = data.TEXT.vocab.vectors vocab_size = len(data.TEXT.vocab) embed_d = fasttext.shape[1] hidden_d = embed_d // 2 context_d = 150 if s.args.dataset == 'imdb': output_d = 1 else: output_d = 3 pad_idx = data.TEXT.vocab.stoi[data.TEXT.pad_token] unk_idx = data.TEXT.vocab.stoi[data.TEXT.unk_token] model = Network(vocab_size, embed_d, hidden_d, output_d, context_d, s.args.dropout, pad_idx) model.to(s.device) model.embedding.weight.data.copy_(fasttext) model.embedding.weight.data[unk_idx] = torch.zeros(embed_d) model.embedding.weight.data[pad_idx] = torch.zeros(embed_d) return model
def main(image_dir, checkpoint_path, coloured_images_dir): test_data = ImageDataset(image_dir) num_images = len(os.listdir(f"{image_dir}/test")) test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=num_images) model = Network() model = model.to(device) if device == torch.device("cpu"): checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu")) elif device == torch.device("cuda"): checkpoint = torch.load(checkpoint_path) model.load_state_dict(checkpoint["model_state_dict"]) model.eval() img_gray, img_ab, img_inception = iter(test_dataloader).next() img_gray, img_ab, img_inception = img_gray.to(device), img_ab.to( device), img_inception.to(device) with torch.no_grad(): output = model(img_gray, img_inception) for idx in range(num_images): try: _, predicted_image, _ = convert_to_rgb(img_gray[idx].cpu(), output[idx].cpu(), img_ab[idx].cpu()) plt.imsave(arr=predicted_image, fname=f"{coloured_images_dir}/colourized_{idx}.jpg") except IndexError: break
def main(encoded): parser = argparse.ArgumentParser() parser.add_argument('-bs', type=int, help='batch size', default=64) parser.add_argument('-e', type=int, help=' number of epochs', default=10) parser.add_argument('-lr', type=float, help='learning rate', default=0.001) parser.add_argument('-sl', type=int, help="sequence length", default=100) parser.add_argument('-nh', type=int, help="number of hidden", default=128) parser.add_argument('-nl', type=int, help="number of layers", default=2) parser.add_argument('-dropout', type=float, help="dropout", default=0.5) args = parser.parse_args() net = Network(chars, n_hidden=args.nh, n_layers=args.nl, drop_prob=0.5, lr=args.lr) train(net, encoded, epochs=args.e, batch_size=args.bs, seq_length=args.sl) model_name = "rnn_{}_epochs.net".format(args.e) checkpoint = { "n_hidden": net.n_hidden, "n_layers": net.n_layers, "state_dict": net.state_dict(), "tokens": net.chars } with open("weights/" + model_name, mode="wb") as f: torch.save(checkpoint, f)
def main(): torch.cuda.set_device(args.gpu) cudnn.benchmark = True cudnn.enabled = True logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) model = Network(args.init_ch, 10, args.layers, True, genotype).cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) checkpoint = torch.load(args.checkpoint + '/top1.pt') model.load_state_dict(checkpoint['model_state_dict']) criterion = nn.CrossEntropyLoss().cuda() CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124] CIFAR_STD = [0.24703233, 0.24348505, 0.26158768] valid_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(CIFAR_MEAN, CIFAR_STD), ]) valid_queue = torch.utils.data.DataLoader(dset.CIFAR10( root=args.data, train=False, transform=valid_transform), batch_size=args.batch_size, shuffle=True, num_workers=2, pin_memory=True) valid_acc, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc: %f', valid_acc)
def show_data_pairs(): from model import Network net = Network( input_shape=[384, 384], data_dir='E:\\datasets\\ADEChallengeData2016\\images\\training\\', label_dir='E:\\datasets\\ADEChallengeData2016\\annotations\\training\\', batch_size=1, learning_rate=8e-4, epoch=1, pre_train=True) print('--Initialized Network') while True: try: xs, ys = net.sess.run([net.batch_xs, net.batch_ys]) except tf.errors.OutOfRangeError: break plt.figure() plt.subplot(121) plt.imshow(xs.reshape((384, 384, 3))) plt.subplot(122) plt.imshow(ys.reshape((384, 384)), cmap='gray') plt.show() plt.close() ss = set() for i in range(384): for j in range(384): ss.add(ys[0, i, j, 0]) print(ss)
def main(): np.random.seed(args.seed) torch.manual_seed(args.seed) cudnn.benchmark = True cudnn.enabled = True logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) # equal to: genotype = genotypes.DARTS_v2 genotype = eval("genotypes.%s" % args.arch) print('Load genotype:', genotype) model = Network(args.init_ch, 10, args.layers, args.auxiliary, genotype).cuda() utils.load(model, args.exp_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss().cuda() _, test_transform = utils._data_transforms_cifar10(args) test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) test_queue = torch.utils.data.DataLoader( test_data, batch_size=args.batchsz, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc, test_obj = infer(test_queue, model, criterion) logging.info('test_acc %f', test_acc)
def main(): ## test set loading test_loader, classes = get_test_loader(root_dir='./datasets', batch_size=128, augmented=True) ## load network PATH_test = 'networks/network.pt' nett = Network().cuda() nett.load_state_dict(torch.load(PATH_test)) nett.eval() ##nett = network device_t = torch.device('cuda') total_loss = 0 total_correct = 0 for batch_t in test_loader: images = batch_t[0].to(device_t) labels = batch_t[1].to(device_t) preds_t = nett(images) ## pass batch loss_t = F.cross_entropy(preds_t, labels) total_loss += loss_t.item() * test_loader.batch_size total_correct += preds_t.argmax(dim=1).eq(labels).sum().item() print(total_loss) print('accuracy:', total_correct / (len(test_loader) * test_loader.batch_size))
def main(args): print("Loading data") train_dataset = datasets.ImageFolder(args.data + "/train", transform=train_transforms) val_dataset = datasets.ImageFolder(args.data + "/val", transform=val_transforms) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=1) print("Data loaded") model = Network(args) if args.cuda: model = model.cuda() optimizer = optim.Adam(model.parameters(), lr=args.lr) print("Starting training") # train(args, model, optimizer, train_loader, val_loader) validation(model, val_loader)
def main(): # logging.info('no gpu device available') # sys.exit(1) np.random.seed(args.seed) torch.manual_seed(args.seed) if torch.cuda.is_available(): torch.cuda.set_device(args.gpu) cudnn.benchmark = True cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) utils.load(model, args.model_path, strict=False) model = model.to_device() # to(torch._default_device) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.to_device() # to(torch._default_device) _, test_transform = utils._data_transforms_cifar10(args) test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) test_queue = torch.utils.data.DataLoader( test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_acc, test_obj = infer(test_queue, model, criterion) logging.info('test_acc %f', test_acc)
def main(): ## loading the network PATH = 'networks/network.pt' net = Network().cuda() net.load_state_dict(torch.load(PATH)) net.eval() ## getting the classes test_set = torchvision.datasets.FashionMNIST(root='./datasets', train=False, download=True, transform=transforms.Compose( [transforms.ToTensor()])) classes = test_set.classes imsize = 28 loader_ex = transforms.Compose([ transforms.Resize(imsize), # scale imported image transforms.ToTensor() ]) image = Image.open('images/image1.jpg') ## image directory goes here __ x = TF.to_grayscale(image) x = loader_ex(x) idx = net(x.unsqueeze(0).cuda()).argmax(dim=1)[0] print(classes[idx])
def main(_): net = Network(FLAGS.input_height, FLAGS.input_width) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) print('Load success: %s' % ckpt.model_checkpoint_path) else: print('Load failed. Check your checkpoint directory!!!') # read image img = cv2.resize(cv2.imread(FLAGS.test_image), (FLAGS.input_height, FLAGS.input_width)) _input = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) _input = np.expand_dims(_input, axis=0) # _input이 uint8이네 ㄷㄷ out = sess.run(net.preds, feed_dict={net.inputs: _input}) out = out[0] * 255 if not os.path.exists(FLAGS.save_dir): os.makedirs(FLAGS.save_dir) cv2.imshow('test input', img) cv2.imshow('test output', out) cv2.imwrite(os.path.join(FLAGS.save_dir, FLAGS.test_image), out) cv2.waitKey(0) cv2.destroyAllWindows()
def main(): if not torch.cuda.is_available(): print('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, US_CLASSES, args.layers, args.auxiliary, genotype, args.drop_path_prob) model = model.cuda() utils.load(model, args.model_path) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() _, test_transform = utils._data_transforms_us(args) test_data = datasets.ImageFolder(root=args.data, transform=test_transform) test_queue = torch.utils.data.DataLoader( test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) test_acc = infer(test_queue, model, criterion) print('test_acc %f', test_acc)
def __init__(self): self.net = Network() self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) # 加载模型到sess中 self.restore()
def register(): request_data = request.get_json() try: ip_list = tuple(request_data['ip_list']) hostname = request_data['hostname'] except TypeError: return '"ip_list" or "hostname" key is missing or wrong datatype', 400 # Bad Request unique_id = request_data.get('unique_id', None) remote_ip = get_remote_ip(request) network = networks.get(remote_ip, Network(remote_ip)) key = unique_id if unique_id else hostname if key in network: network[key].update_timestamp() network[key].ip_list = ip_list network[key].hostname = hostname else: network[key] = Device(ip_list, hostname, unique_id) networks[remote_ip] = network return ''
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) torch.cuda.set_device(args.gpu) cudnn.enabled = True logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() utils.load(model, args.model_path) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() _, test_transform = utils._data_transforms_cifar10(args) test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) test_queue = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, pin_memory=False, num_workers=2) model.drop_path_prob = 0.0 test_acc, test_obj = infer(test_queue, model, criterion) logging.info('Test_acc %f', test_acc)
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype_path = os.path.join(utils.get_dir(), os.path.split(args.model_path)[0], 'genotype.txt') if os.path.isfile(genotype_path): with open(genotype_path, "r") as f: geno_raw = f.read() genotype = eval(geno_raw) else: genoname = os.path.join(utils.get_dir(), os.path.split(args.model_path)[0], 'genoname.txt') if os.path.isfile(genoname): with open(genoname, "r") as f: args.arch = f.read() genotype = eval("genotypes.%s" % args.arch) else: genotype = eval("genotypes.BATH") model = Network(args.init_channels, 1, args.layers, args.auxiliary, genotype, input_channels=4) model = model.cuda() print(os.path.join(utils.get_dir(), args.model_path)) utils.load(model, os.path.join(utils.get_dir(), args.model_path)) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.MSELoss() criterion = criterion.cuda() test_data_tne = utils.BathymetryDataset(args, "../29TNE.csv", root_dir="dataset/bathymetry/29TNE/dataset_29TNE", to_trim="/tmp/pbs.6233542.admin01/tmp_portugal/", to_filter=False) test_queue_tne = torch.utils.data.DataLoader( test_data_tne, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) model.drop_path_prob = args.drop_path_prob test_obj, targets, preds = infer(test_queue_tne, model, criterion, args.depth_normalization) logging.info('test_obj tne %f', test_obj) test_data_tne.write_results(targets, preds, os.path.join(args.save, 'tne_results.csv')) test_data_smd = utils.BathymetryDataset(args, "../29SMD.csv", root_dir="dataset/bathymetry/29SMD/dataset_29SMD", to_trim="/tmp/pbs.6233565.admin01/tmp_portugal/", to_filter=False) test_queue_smd = torch.utils.data.DataLoader( test_data_smd, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) test_obj, targets, preds = infer(test_queue_smd, model, criterion, args.depth_normalization) logging.info('test_obj smd %f', test_obj) test_data_smd.write_results(targets, preds, os.path.join(args.save, 'smd_results.csv'))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) train_transform, valid_transform = utils._data_transforms_cifar10(args) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs)) for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs start_time = time.time() train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) valid_acc, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc %f', valid_acc) end_time = time.time() duration = end_time - start_time print('Epoch time: %ds.' %duration) utils.save(model, os.path.join(args.save, 'weights.pt'))
def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled = True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) in_channels, num_classes, dataset_in_torch, stride_for_aux = utils.dataset_fields( args, train=False) # new genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, in_channels, stride_for_aux, num_classes, args.layers, args.auxiliary, genotype) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) train_data, valid_data = utils.dataset_split_and_transform( dataset_in_torch, args, train=False) # new train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, float(args.epochs)) for epoch in range(args.epochs): scheduler.step() logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0]) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('train_acc %f', train_acc) valid_acc, valid_obj = infer(valid_queue, model, criterion) logging.info('valid_acc %f', valid_acc) utils.save(model, os.path.join(args.save, 'weights.pt'))
def make_animation(): color_map = np.array([[255, 255, 255], # white [190, 190, 190], # gray [0, 191, 255], # blue [255, 165, 0], # orange [0, 250, 154]]) # green test_name = 'test4.pkl' with open(test_name, 'rb') as f: tests = pickle.load(f) test_case = 1 model_name = config.save_interval * 40 steps = 30 network = Network() network.eval() network.to(device) state_dict = torch.load('./models/{}.pth'.format(model_name), map_location=device) network.load_state_dict(state_dict) env = Environment() env.load(tests['maps'][test_case], tests['agents'][test_case], tests['goals'][test_case]) fig = plt.figure() done = False obs_pos = env.observe() imgs = [] while not done and env.steps < steps: imgs.append([]) map = np.copy(env.map) for agent_id in range(env.num_agents): if np.array_equal(env.agents_pos[agent_id], env.goals_pos[agent_id]): map[tuple(env.agents_pos[agent_id])] = 4 else: map[tuple(env.agents_pos[agent_id])] = 2 map[tuple(env.goals_pos[agent_id])] = 3 map = map.astype(np.uint8) img = plt.imshow(color_map[map], animated=True) imgs[-1].append(img) for i, ((agent_x, agent_y), (goal_x, goal_y)) in enumerate(zip(env.agents_pos, env.goals_pos)): text = plt.text(agent_y, agent_x, i, color='black', ha='center', va='center') imgs[-1].append(text) text = plt.text(goal_y, goal_x, i, color='black', ha='center', va='center') imgs[-1].append(text) actions, _, _ = network.step(torch.from_numpy(obs_pos[0].astype(np.float32)).to(device), torch.from_numpy(obs_pos[1].astype(np.float32)).to(device)) obs_pos, _, done, _ = env.step(actions) # print(done) ani = animation.ArtistAnimation(fig, imgs, interval=500, blit=True, repeat_delay=1000) ani.save('dynamic_images.mp4')
def __init__(self): self.network = Network(action_space=4, step_repeat_times=frame_siz, alpha=alpha, beta=beta) self.workers = Workers(num_workers=Worker_num) self.optimizer = torch.optim.Adam(self.network.parameters(), lr=1e-4) self.global_score = collections.deque(maxlen=50) self.w_states = [self.workers.get_state(i) for i in range(self.workers.num_workers)] # states of each workers. self.w_stop = [] # if true then do not worker.step() self.train_time = 0