def __init__(self): self.interfaces = self.get_available_interfaces() self.default_iface = self.interfaces[0] self.localIP = self.get_local_ip(self.default_iface) self.network = network.init_network(self) self.cur = self.network.cur self.categories = None self.loaded_capabilities = {} self.cnc = self.localIP # TODO
def define_Gen(input_nc, output_nc, ngf, norm='batch', use_dropout=False, gpu_ids=[0]): norm_layer = get_norm_layer(norm_type=norm) if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d gen_net = Generator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, use_bias = use_bias) return init_network(gen_net, gpu_ids)
def define_Dis(input_nc, ndf, norm='batch', gpu_ids=[0]): norm_layer = get_norm_layer(norm_type=norm) if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d dis_net = Discriminator(input_nc, ndf, norm_layer=norm_layer, use_bias=use_bias) return init_network(dis_net, gpu_ids)
def test_init_network(self): network = nw.init_network() np.testing.assert_array_equal( network['W1'], np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])) np.testing.assert_array_equal(network['b1'], np.array([0.1, 0.2, 0.3])) np.testing.assert_array_equal( network['W2'], np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])) np.testing.assert_array_equal(network['b2'], np.array([0.1, 0.2])) np.testing.assert_array_equal(network['W3'], np.array([[0.1, 0.3], [0.2, 0.4]])) np.testing.assert_array_equal(network['b3'], np.array([0.1, 0.2]))
def main(): # print('fsafsdaf:', args.training_dataset, args.arch) print(">> Creating directory if it does not exist:\n>> '{}'".format( args.directory)) if not os.path.exists(args.directory): os.makedirs(args.directory) log_dir = os.path.join(args.directory, 'log') if not os.path.exists(log_dir): os.mkdir(log_dir) params = {'architecture': args.arch, 'pooling': args.pool} n_classes = args.n_classes n_samples = args.n_samples cuda = args.cuda input_size = args.image_size transform, transform_te, transform_label = init_transform(input_size) kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {} online_train_loader, online_test_loader = init_data_loader( args.root, n_classes, n_samples, transform, transform_te, transform_label, kwargs) # Set up the network and training parameters model = init_network(params) parameters = [] # add feature parameters parameters.append({'params': model.features.parameters()}) if cuda: # print('model cuda:', cuda) model.cuda() pos_margin = 1.0 neg_margin = 0.3 # define optimizer if args.optimizer == 'sgd': optimizer = torch.optim.SGD(parameters, args.lr, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'adam': optimizer = torch.optim.Adam(parameters, args.lr, weight_decay=args.weight_decay) metrics = [AverageNonzeroTripletsMetric()] if args.loss.startswith('OnlineContrastiveEucLoss'): loss_fn = OnlineContrastiveEucLoss(pos_margin, neg_margin, HardNegativePairSelector()) elif args.loss.startswith('OnlineContrastiveCosLoss'): loss_fn = OnlineContrastiveCosLoss(args.loss_margin) elif args.loss.startswith('OnlineTriplet'): loss_fn = OnlineTripletLoss( args.loss_margin, HardestNegativeTripletSelector(args.loss_margin)) exp_decay = math.exp(-0.01) scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=exp_decay) writer = SummaryWriter(log_dir=log_dir) writer.add_graph(model.features, torch.ones([1, 3, 224, 224]).cuda()) fit(online_train_loader, online_test_loader, model, loss_fn, optimizer, scheduler, writer, metrics=metrics, args=args)
def test_forward(self): network = nw.init_network() x = np.array([1.0, 0.5]) y = nw.forward(network, x) self.assertEqual(y[0], 0.3168270764110298) self.assertEqual(y[1], 0.6962790898619668)
#-*- coding: utf-8 -*- import network import devices import sys if __name__ == '__main__': server = network.init_network() device_type = 'default' if len(sys.argv) == 2: device_type = sys.argv[1] device = devices.create_device(device_type) print('press enter to quit...') try: sys.stdin.readline() except KeyboardInterrupt: pass finally: device = None network.deinit_network(server)
def inference_test(args): tar_dir = time.strftime("%m %d %H:%M:%S %Y", time.localtime()) os.mkdir(tar_dir) mode_path = os.path.join(args.directory, args.resume) params = {'architecture': args.arch, 'pooling': args.pool} model = init_network(params) if not os.path.exists(mode_path): print(">> No checkpoint found at '{}'".format(mode_path)) return else: # load checkpoint weights and update model and optimizer print(">> Loading checkpoint:\n>> '{}'".format(args.resume)) checkpoint = torch.load(mode_path) start_epoch = checkpoint['epoch'] print('ul epoch:', start_epoch) min_loss = checkpoint['min_loss'] print(min_loss) model.load_state_dict(checkpoint['state_dict']) print(">>>> loaded checkpoint:\n>>>> '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) model = torch.nn.DataParallel(model, device_ids=device_ids, dim=0).cuda() # Encapsulate the model model.eval() cuda = args.cuda batch_size = 16 index_data_loader, query_data_loader = TEST_DATA_LOADER( kwargs={ 'num_workers': 16, 'pin_memory': False, 'batch_size': batch_size } if cuda else {}) print('steps num:', len(index_data_loader)) count = 0 index_array_embedding = np.empty([0, 2048]) index_array_fn = np.empty(0) for index_tensor, index_fn in index_data_loader: print('count:', count) count += 1 print(index_tensor.shape, len(index_fn)) index_tensor = index_tensor.cuda() t_start = time.time() out_tesnor = model.forward(index_tensor) print("foward time:", time.time() - t_start) t_start = time.time() out_tesnor = out_tesnor.cpu().detach().numpy() print('gpu2cpu time:', time.time() - t_start) index_array_embedding = np.concatenate( (index_array_embedding, out_tesnor), axis=0) print(index_fn[0:10]) index_array_fn = np.concatenate((index_array_fn, index_fn)) print('index_array_fn shape:', index_array_fn.shape) print('index_array_embedding shape:', index_array_embedding.shape) print('index_array_fn shape:', index_array_fn.shape) np.save(os.path.join(tar_dir, 'index_array_embedding.npy'), index_array_embedding) np.save(os.path.join(tar_dir, 'index_array_fn.npy'), index_array_fn) print('save index embedding and fn done') count = 0 all_sum = len(query_data_loader) print('allsum', all_sum) query_array_embeeding = np.empty(0, 2048) query_array_fn = np.empty(0) for query_tensor, query_fn in query_data_loader: print('allsum', all_sum, ' count:', count) count += 1 query_tensor = query_tensor.cuda() out_tesnor = model.forward(query_tensor) out_tesnor = out_tesnor.cpu().detach().numpy() query_array_embeeding = np.concatenate( (query_array_embeeding, out_tesnor)) query_array_fn = np.concatenate((query_array_fn, query_fn)) print('query_array_embeeding shape:', query_array_embeeding.shape) print('query_array_fn shape:', query_array_fn.shape) np.save(os.path.join(tar_dir, 'query_array_embeeding.npy'), query_array_embeeding) np.save(os.path.join(tar_dir, 'query_fn_array.npy'), query_fn_array) print('save query embedding and fn done') calculate_rank(index_array_embedding, query_array_embeeding, tar_dir, topk=100)