Пример #1
0
def main():

    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    cudnn.enabled = True

    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    model = Network(args.init_ch, 10, args.layers, True, genotype).cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    checkpoint = torch.load(args.checkpoint + '/top1.pt')
    model.load_state_dict(checkpoint['model_state_dict'])
    criterion = nn.CrossEntropyLoss().cuda()

    CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
    CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]

    valid_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
    ])

    valid_queue = torch.utils.data.DataLoader(dset.CIFAR10(
        root=args.data, train=False, transform=valid_transform),
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=2,
                                              pin_memory=True)

    valid_acc, valid_obj = infer(valid_queue, model, criterion)
    logging.info('valid_acc: %f', valid_acc)
Пример #2
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  cudnn.benchmark = True
  cudnn.enabled=True
  logging.info("args = %s", args)
  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  genotype = eval("genotypes.%s" % args.arch)
  model = Network(args.init_channels, CIFAR_CLASSES, args.layers, genotype)
  if args.parallel:
    model = MyDataParallel(model).cuda() 
  else:
    model = model.cuda()
  bin_op = bin_utils.BinOp(model, args)

  _, valid_transform = utils._data_transforms_cifar10(args)
  valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)

  valid_queue = torch.utils.data.DataLoader(
      valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)

  utils.load(model, args.path_to_weights)
  if args.parallel:
    model.module.drop_path_prob = args.drop_path_prob * (args.epochs-1) / args.epochs
  else:
    model.drop_path_prob = args.drop_path_prob * (args.epochs-1) / args.epochs
  valid_acc, valid_obj = infer(valid_queue, model, criterion, bin_op)
  logging.info('valid_acc %f', valid_acc)
def get_cifar_tuned_model(load_weights=True):
    network = NetworkCIFAR(40, CIFAR_CLASSES, 20, True, 0.4, CIFAR_TUNED)
    if load_weights:
        device = torch.device('cpu')
        state_dict = torch.load('weights/cifar_tuned.pt', map_location=device)
        network.load_state_dict(state_dict)
    return network
Пример #4
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    torch.cuda.set_device(args.gpu)
    cudnn.enabled = True
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
                    args.auxiliary, genotype)
    model = model.cuda()
    utils.load(model, args.model_path)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    _, test_transform = utils._data_transforms_cifar10(args)
    test_data = dset.CIFAR10(root=args.data,
                             train=False,
                             download=True,
                             transform=test_transform)

    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=False,
                                             num_workers=2)

    model.drop_path_prob = 0.0
    test_acc, test_obj = infer(test_queue, model, criterion)
    logging.info('Test_acc %f', test_acc)
Пример #5
0
def main():
    # logging.info('no gpu device available')
    # sys.exit(1)

  np.random.seed(args.seed)
  torch.manual_seed(args.seed)
  if torch.cuda.is_available():
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    cudnn.enabled=True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)

  genotype = eval("genotypes.%s" % args.arch)
  model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
  utils.load(model, args.model_path, strict=False)
  model = model.to_device() # to(torch._default_device)

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.to_device() # to(torch._default_device)

  _, test_transform = utils._data_transforms_cifar10(args)
  test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)

  test_queue = torch.utils.data.DataLoader(
      test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)

  model.drop_path_prob = args.drop_path_prob
  test_acc, test_obj = infer(test_queue, model, criterion)
  logging.info('test_acc %f', test_acc)
Пример #6
0
def main():

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    cudnn.enabled = True
    torch.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_ch, 10, args.layers, args.auxiliary,
                    genotype).cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.wd)

    train_transform, valid_transform = utils._data_transforms_cifar10(args)
    train_data = dset.CIFAR10(root=args.data,
                              train=True,
                              download=True,
                              transform=train_transform)
    valid_data = dset.CIFAR10(root=args.data,
                              train=False,
                              download=True,
                              transform=valid_transform)

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batchsz,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=2)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batchsz,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=2)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    for epoch in range(args.epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        valid_acc, valid_obj = infer(valid_queue, model, criterion)
        logging.info('valid_acc: %f', valid_acc)

        train_acc, train_obj = train(train_queue, model, criterion, optimizer)
        logging.info('train_acc: %f', train_acc)

        utils.save(model, os.path.join(args.save, 'trained.pt'))
        print('saved to: trained.pt')
Пример #7
0
def main():


    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    cudnn.enabled = True
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    # equal to: genotype = genotypes.DARTS_v2
    genotype = eval("genotypes.%s" % args.arch)
    print('Load genotype:', genotype)
    model = Network(args.init_ch, 10, args.layers, args.auxiliary, genotype).cuda()
    utils.load(model, args.exp_path)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss().cuda()

    _, test_transform = utils._data_transforms_cifar10(args)
    test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
    test_queue = torch.utils.data.DataLoader(
        test_data, batch_size=args.batchsz, shuffle=False, pin_memory=True, num_workers=2)

    model.drop_path_prob = args.drop_path_prob
    test_acc, test_obj = infer(test_queue, model, criterion)
    logging.info('test_acc %f', test_acc)
Пример #8
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    in_channels, num_classes, dataset_in_torch, stride_for_aux = utils.dataset_fields(
        args, train=False)  # new
    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, in_channels, stride_for_aux,
                    num_classes, args.layers, args.auxiliary, genotype)
    model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    train_data, valid_data = utils.dataset_split_and_transform(
        dataset_in_torch, args, train=False)  # new
    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=2)

    valid_queue = torch.utils.data.DataLoader(valid_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=2)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    for epoch in range(args.epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        train_acc, train_obj = train(train_queue, model, criterion, optimizer)
        logging.info('train_acc %f', train_acc)

        valid_acc, valid_obj = infer(valid_queue, model, criterion)
        logging.info('valid_acc %f', valid_acc)

        utils.save(model, os.path.join(args.save, 'weights.pt'))
Пример #9
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)

  genotype = eval("genotypes.%s" % args.arch)
  model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
  model = model.cuda()

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  optimizer = torch.optim.SGD(
      model.parameters(),
      args.learning_rate,
      momentum=args.momentum,
      weight_decay=args.weight_decay
      )

  train_transform, valid_transform = utils._data_transforms_cifar10(args)
  train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
  valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)

  train_queue = torch.utils.data.DataLoader(
      train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)

  valid_queue = torch.utils.data.DataLoader(
      valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)

  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))

  for epoch in range(args.epochs):
    scheduler.step()
    logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
    model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

    start_time = time.time()

    train_acc, train_obj = train(train_queue, model, criterion, optimizer)
    logging.info('train_acc %f', train_acc)

    valid_acc, valid_obj = infer(valid_queue, model, criterion)
    logging.info('valid_acc %f', valid_acc)

    end_time = time.time()
    duration = end_time - start_time
    print('Epoch time: %ds.' %duration)

    utils.save(model, os.path.join(args.save, 'weights.pt'))
Пример #10
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)

  genotype = eval("genotypes.%s" % args.arch)
  model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype,output_height=args.img_cropped_height,output_width=args.img_cropped_width)
  model = model.cuda()

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  criterion = nn.MSELoss()
  criterion = criterion.cuda()
  optimizer = torch.optim.SGD(
      model.parameters(),
      args.learning_rate,
      momentum=args.momentum,
      weight_decay=args.weight_decay
      )

  train_transform, valid_transform = utils._data_trainsforms_denosining_dataset(args)
  train_data = DENOISE_DATASET(root=args.data,train_folder=args.train_data,label_folder=args.label_data,train=True, transform=train_transform,target_transform=train_transform )
 #valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
  num_train = len(train_data)
  indices = list(range(num_train))
  split = int(np.floor(args.train_portion * num_train))

  train_queue = torch.utils.data.DataLoader(
      train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True, num_workers=2)

  valid_queue = torch.utils.data.DataLoader(
      train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True, num_workers=2)

  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))

  for epoch in range(args.epochs):
    scheduler.step()
    logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
    model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

    train_obj = train(train_queue, model, criterion, optimizer)
    logging.info('train_obj %f', train_obj)

    valid_obj = infer(valid_queue, model, criterion)
    logging.info('valid_obj %f', valid_obj)

    utils.save(model, os.path.join(args.save, './weights.pt'))
Пример #11
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    in_channels = 3
    num_classes = 10
    stride_for_aux = 3

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, in_channels, stride_for_aux,
                    num_classes, args.layers, args.auxiliary, genotype)
    model = model.cuda()
    utils.load(model, args.model_path)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    _, test_transform = utils._data_transforms_cifar10(args)
    #test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)

    #version = 'v6'
    #images, labels = utils_test.load_new_test_data(version)
    #num_images = images.shape[0]

    data_10 = np.load(
        "/home/sivan/darts/cnn/data_cifar_10_1/cifar10.1_v4_data.npy")
    labels_10 = np.load(
        '/home/sivan/darts/cnn/data_cifar_10_1/cifar10.1_v4_labels.npy')
    #
    tensor_x = torch.stack([torch.Tensor(i)
                            for i in data_10])  # transform to torch tensors
    tensor_y = torch.stack([torch.Tensor(i) for i in labels_10])

    test_data = utils_d.TensorDataset(tensor_x, tensor_y)  # create your datset

    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=2)

    model.drop_path_prob = args.drop_path_prob
    test_acc, test_obj = infer(test_queue, model, criterion)
    logging.info('test_acc %f', test_acc)
Пример #12
0
    def _init_model(self):
        genotype = eval('genotypes.%s' % self.args.arch)
        model = Network(self.args.init_channels, self.num_classes,
                        self.args.layers, self.args.auxiliary, genotype,
                        self.args.parse_method)
        flops, params = profile(model,
                                inputs=(torch.randn(1, 3, 32, 32), ),
                                verbose=False)
        self.logger.info('flops = %fM', flops / 1e6)
        self.logger.info('param size = %fM', params / 1e6)

        # Try move model to multi gpus
        if torch.cuda.device_count() > 1 and self.args.multi_gpus:
            self.logger.info('use: %d gpus', torch.cuda.device_count())
            model = nn.DataParallel(model)
        else:
            self.logger.info('gpu device = %d' % self.device_id)
            torch.cuda.set_device(self.device_id)
        self.model = model.to(self.device)

        criterion = nn.CrossEntropyLoss()
        self.criterion = criterion.to(self.device)
        self.optimizer = torch.optim.SGD(model.parameters(),
                                         self.args.learning_rate,
                                         momentum=self.args.momentum,
                                         weight_decay=self.args.weight_decay)

        self.best_acc_top1 = 0
        # optionally resume from a checkpoint
        if self.args.resume:
            if os.path.isfile(self.args.resume):
                print("=> loading checkpoint {}".format(self.args.resume))
                checkpoint = torch.load(self.args.resume,
                                        map_location=self.device)
                self.dur_time = checkpoint['dur_time']
                self.args.start_epoch = checkpoint['epoch']
                self.best_acc_top1 = checkpoint['best_acc_top1']
                self.args.drop_path_prob = checkpoint['drop_path_prob']
                self.model.load_state_dict(checkpoint['state_dict'])
                self.optimizer.load_state_dict(checkpoint['optimizer'])
                print("=> loaded checkpoint '{}' (epoch {})".format(
                    self.args.resume, checkpoint['epoch']))
            else:
                print("=> no checkpoint found at '{}'".format(
                    self.args.resume))

        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            self.optimizer,
            float(self.args.epochs),
            eta_min=0,
            last_epoch=-1
            if self.args.start_epoch == 0 else self.args.start_epoch)
        # reload the scheduler if possible
        if self.args.resume and os.path.isfile(self.args.resume):
            checkpoint = torch.load(self.args.resume)
            self.scheduler.load_state_dict(checkpoint['scheduler'])
Пример #13
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)

  # 得到train_search里学好的normal cell 和reduction cell,genotypes.DARTS就是选的学好的DARTS_V2
  genotype = eval("genotypes.%s" % args.arch)#DARTS_V2 = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])
  # 这里的Network用的是model.py的NetworkCIFAR
  model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
  model = model.cuda()

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()
  optimizer = torch.optim.SGD(
      model.parameters(),
      args.learning_rate,
      momentum=args.momentum,
      weight_decay=args.weight_decay
      )

  train_transform, valid_transform = utils._data_transforms_cifar10(args)
  train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
  valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)

  train_queue = torch.utils.data.DataLoader(
      train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)

  valid_queue = torch.utils.data.DataLoader(
      valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)

  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))

  for epoch in range(args.epochs):
    scheduler.step()
    logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
    model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

    train_acc, train_obj = train(train_queue, model, criterion, optimizer)
    logging.info('train_acc %f', train_acc)

    valid_acc, valid_obj = infer(valid_queue, model, criterion)
    logging.info('valid_acc %f', valid_acc)

    utils.save(model, os.path.join(args.save, 'weights.pt'))
Пример #14
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    try:
        genotype = eval("genotypes.%s" % args.arch)
    except (AttributeError, SyntaxError):
        genotype = genotypes.load_genotype_from_file(args.arch)

    test_data, OUTPUT_DIM, IN_CHANNELS, is_regression = load_dataset(
        args, train=False)

    model = Network(args.init_channels,
                    OUTPUT_DIM,
                    args.layers,
                    args.auxiliary,
                    genotype,
                    num_channels=IN_CHANNELS)
    model = model.cuda()

    utils.load(model, args.model_path)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss() if not is_regression else nn.MSELoss()
    criterion = criterion.cuda()

    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=2)

    model.drop_path_prob = args.drop_path_prob
    test_acc, test_obj = infer(test_queue,
                               model,
                               criterion,
                               is_regression=is_regression)
    logging.info('test_acc (R^2 for regression) %f', test_acc)

    weights_foldername = os.path.dirname(args.model_path)
    with open(os.path.join(weights_foldername, "test.txt"), "w") as f:
        f.write(str(test_acc))
Пример #15
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels,
                    CIFAR_CLASSES,
                    args.layers,
                    args.auxiliary,
                    genotype,
                    output_height=args.img_cropped_height,
                    output_width=args.img_cropped_width)
    model = model.cuda()
    utils.load(model, args.model_path)
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.MSELoss()
    criterion = criterion.cuda()

    test_transform, test_valid_transform = utils._data_trainsforms_denosining_dataset(
        args)
    test_data = DENOISE_DATASET_TEST(root=args.data,
                                     train_folder=args.train_data,
                                     label_folder=args.label_data,
                                     train=True,
                                     transform=test_transform,
                                     target_transform=test_transform)

    # _, test_transform = utils._data_transforms_cifar10(args)
    # test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)

    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=2)

    model.drop_path_prob = args.drop_path_prob
    test_acc, psnr = infer(test_queue, model, criterion)
    logging.info('test_acc %f', test_acc)
    logging.info('Final psnr_acc %f', psnr)
Пример #16
0
def main():
    np.random.seed(args.seed)

    gpus = [int(i) for i in args.gpu.split(',')]
    if len(gpus) == 1:
        torch.cuda.set_device(int(args.gpu))

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %s' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
                    args.auxiliary, genotype)
    model.cuda()

    if len(gpus) > 1:
        print("True")
        model = nn.parallel.DataParallel(model,
                                         device_ids=gpus,
                                         output_device=gpus[0])
        model = model.module

    utils.load(model, args.model_path)
    print("If the model is running on GPU:", next(model.parameters()).is_cuda)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    _, test_transform = utils._data_transforms_cifar10(args)
    test_data = dset.CIFAR10(root=args.data,
                             train=False,
                             download=True,
                             transform=test_transform)

    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=2)

    model.drop_path_prob = args.drop_path_prob
    test_acc, test_obj = infer(test_queue, model, criterion)
    logging.info('test_acc %f', test_acc)
Пример #17
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
                    args.auxiliary, genotype)
    model = model.cuda()
    checkpoint = torch.load(args.model_path)
    model.load_state_dict(checkpoint['model_state_dict'])

    print("param size = {:.1f}MB".format(
        floor(utils.count_parameters_in_MB(model), 1)))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    _, test_transform = utils._data_transforms_cifar10(args)
    test_data = dset.CIFAR10(root=args.data,
                             train=False,
                             download=True,
                             transform=test_transform)

    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=2)

    model.drop_path_prob = args.drop_path_prob
    test_acc, test_obj = infer(test_queue, model, criterion)
    logging.info('test_acc %f', test_acc)
Пример #18
0
    def build_model(self) -> nn.Module:
        genotype = self.get_genotype_from_hps()

        model = Network(
            self.hparams["init_channels"],
            10,  # num_classes
            self.hparams["layers"],
            self.hparams["auxiliary"],
            genotype,
        )
        print("param size = {} MB".format(utils.count_parameters_in_MB(model)))
        size = 0
        for p in model.parameters():
            size += p.nelement()
        print("param count: {}".format(size))

        # If loading backbone weights, do not call reset_parameters() or
        # call before loading the backbone weights.
        reset_parameters(model)
        return model
Пример #19
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
                    args.auxiliary, genotype)
    model = model.cuda()
    utils.load(model, args.model_path)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    data_dir = '../data/kmnist/'
    data_augmentations = transforms.ToTensor()

    # Load the Data here
    test_dataset = K49(data_dir, False, data_augmentations)

    test_queue = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=2)

    model.drop_path_prob = args.drop_path_prob
    test_acc, test_obj = infer(test_queue, model, criterion)
    logging.info('test_acc %f', test_acc)
Пример #20
0
def main():
    if not torch.cuda.is_available():
        sys.exit(1)

    ## step 1 construct the selected network
    genotype = eval("genotypes.%s" % args.selected_arch)
    model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
                    args.auxiliary, genotype)
    ## step 2 load pretrained model parameter
    if args.cifar100:
        model = torch.nn.DataParallel(model)
        model = model.cuda()
        model.load_state_dict(torch.load(args.model_path)['net'])
    else:
        utils.load(model, args.model_path)
        model = torch.nn.DataParallel(model)
        model = model.cuda()

    model.module.drop_path_prob = 0
    model.drop_path_prob = 0

    print("param size = %fMB" % utils.count_parameters_in_MB(model))
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    ## step 3 load test data
    valid_queue = load_data_cifar(args)

    ## step 4. inference on test data
    valid_acc, valid_obj = infer(valid_queue, model, criterion)
    print('-----------------------------------------------')
    print('Average Valid_acc: %f ' % valid_acc)
    print('-----------------------------------------------')
Пример #21
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)
  
  print('genotype')
  genotype = eval("genotypes.%s" % args.arch)
  print('network')
  model = Network(args.init_channels, args.n_class, args.layers, args.auxiliary, genotype)
  print('cuda')
  model = model.cuda()
  print('load')
  utils.load(model, args.model_path)

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()

  #_, test_transform = utils._data_transforms_cifar10(args)
  #test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
  _, _, _, _,_,test_dat = utils2.get_data("custom", args.data,args.data,args.data, cutout_length=0, validation=True,validation2 = True,n_class = args.n_class, image_size = args.image_size)

  test_queue = torch.utils.data.DataLoader(
      test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)

  model.drop_path_prob = args.drop_path_prob
  a = 2/0
  test_acc, test_obj = infer(test_queue, model, criterion)
  logging.info('test_acc %f', test_acc)
Пример #22
0
def main():
    dev, idx = "cuda", args.gpu
    if not torch.cuda.is_available():
        logging.info('No gpu device available. Will map cpu device to gpu.')
        dev, idx = "cpu", 0

    torch.device(dev, idx)
    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
    utils.load(model, args.model_path)
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    transform = transforms.Compose([
        transforms.Resize((32, 32), 2),
        transforms.ToTensor(),
    ])
    image_path = args.image_path
    image = Image.open(image_path)
    image_tensor = transform(image)
    image_tensor.unsqueeze_(0)
    model.drop_path_prob = args.drop_path_prob

    infer(image_tensor, model)
Пример #23
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)

  genotype = eval("genotypes.%s" % args.arch)
  model = Network(args.init_channels, ntu_CLASSES, args.layers, args.auxiliary, genotype)
  model = model.cuda()
  utils.load(model, args.model_path)

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()

  validset = dataset.MyDataset('/media/lab540/79eff75a-f78c-42f2-8902-9358e88bf654/lab540/Neura_auto_search/datasets/kinetics_convert/test.txt',
                               transform = transform.ToTensor())
  valid_queue = torch.utils.data.DataLoader(validset, batch_size=args.batch_size, shuffle=False, num_workers=1)

  # _, test_transform = utils._data_transforms_cifar10(args)
  # test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
  #
  # test_queue = torch.utils.data.DataLoader(
  #     test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)

  model.drop_path_prob = args.drop_path_prob
  test_acc, test_obj = infer(valid_queue, model, criterion)
  logging.info('test_acc %f', test_acc)
Пример #24
0
def main():
    np.random.seed(args.seed)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info(
        "device = %s" %
        'cuda:{}'.format(args.gpu) if torch.cuda.is_available() else 'cpu')
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
                    args.auxiliary, genotype).to(device)
    utils.load(model, args.model_path)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss().to(device)

    _, test_transform = utils._data_transforms_cifar10(args)
    test_data = dset.CIFAR10(root=args.data,
                             train=False,
                             download=True,
                             transform=test_transform)

    test_queue = torch.utils.data.DataLoader(
        test_data,
        batch_size=args.batch_size,
        shuffle=False,
        pin_memory=True,
        num_workers=2,
    )

    model.drop_path_prob = args.drop_path_prob
    test_acc, test_obj = infer(test_queue, model, criterion)
    logging.info("test_acc %f", test_acc)
Пример #25
0
  def random_generate(self):

    num_skip_connect = SearchControllerConf['random_search']['num_identity']
    num_arch = SearchControllerConf['random_search']['num_arch']
    flops_threshold = SearchControllerConf['random_search']['flops_threshold']

    """Random generate the architecture"""
    # k = 2 + 3 + 4 + 5 = 14
    k = sum(1 for i in range(self._steps) for n in range(2+i))
    num_ops = len(PRIMITIVES)

    self.random_arch_list = []
    for ai in range(num_arch):
      seed = random.randint(0, 1000)
      torch.manual_seed(seed)
      while True:
        self.alphas_normal = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=False)
        self.alphas_reduce = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=False)
        arch = self.genotype()
        # if the skip connect meet num_skip_connect
        op_names, indices = zip(*arch.normal)
        cnt = 0
        for name, index in zip(op_names, indices):
          if name == 'skip_connect':
            cnt += 1
        if cnt == num_skip_connect:
          # the flops threshold
          model = NetworkCIFAR(36, 10, 20, True, arch, False)
          flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32),), verbose=False)
          if flops / 1e6 >= flops_threshold:
            self.random_arch_list += [('arch_' + str(ai), arch)]
            break
          else:
            continue

    return self.random_arch_list
Пример #26
0
def main():
  #判断是否有GPU可用
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu) #设置当前设备
  cudnn.benchmark = True #加速计算
  torch.manual_seed(args.seed) #为cpu设置随机数种子
  cudnn.enabled=True #cuDNN是一个GPU加速深层神经网络原语库,开启cudnn
  torch.cuda.manual_seed(args.seed)#为当前GPU设置随机种子
  #打印日志信息
  logging.info('gpu device = %d' % args.gpu)
  #gpu device = 0
  logging.info("args = %s", args)
  '''
  args = Namespace(arch='DARTS', auxiliary=False, auxiliary_weight=0.4, batch_size=96, 
  cutout=False, cutout_length=16, data='../data', drop_path_prob=0.2, epochs=600, gpu=0,
  grad_clip=5, init_channels=36, layers=20, learning_rate=0.025, model_path='saved_models',
  momentum=0.9, report_freq=50, save='eval-EXP-20190618-170816', seed=0, weight_decay=0.0003)
  '''

  genotype = eval("genotypes.%s" % args.arch) #应该是输出一个框架类型。eval() 函数用来执行一个字符串表达式,并返回表达式的值
  
  #from model import NetworkCIFAR as Network #文件模块
  model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
  #model = Network(通道个数=36, CIFAR_CLASSES=10, 总体layers=20, args.auxiliary使用辅助塔, genotype=框架类型)
  model = model.cuda()

  #打印模型参数的大小,即所占空间
  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
  #param size = 3.349342MB
    
  criterion = nn.CrossEntropyLoss() #定义损失函数
  criterion = criterion.cuda()
  
  #定义优化器
  optimizer = torch.optim.SGD(
      model.parameters(),
      args.learning_rate,
      momentum=args.momentum,
      weight_decay=args.weight_decay
      )
  #获得预处理之后的训练集和验证集
  train_transform, valid_transform = utils._data_transforms_cifar10(args)
  #获取数据集
  train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
  valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
  '''
  Files already downloaded and verified
  Files already downloaded and verified
  '''  
  
  
  #对数据进行封装为Tensor,主要用来读取数据集
  '''
  pin_memory:If True, the data loader will copy tensors into CUDA pinned memory before returning them,在数据返回前,是否将数据复制到CUDA内存中
  num_workers:加快数据导入速度,工作者数量,默认是0。使用多少个子进程来导入数据。设置为0,就是使用主进程来导入数据。注意:这个数字必须是大于等于0的,不能太大,2的时候报错
  '''
  train_queue = torch.utils.data.DataLoader(
      train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=1)

  valid_queue = torch.utils.data.DataLoader(
      valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=1)

  #优化器的学习率调整策略:采用CosineAnnealingLR,余弦退火调整学习率
  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
  
  #默认epochs=600
  for epoch in range(args.epochs):
        
    scheduler.step() #更新权重
    logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
    #epoch 0 lr 2.500000e-02
    
    #进行dropout:大小与模型的深度相关,模型深度越深,dropout的概率越大,最大0.2
    model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
    #调用下面定义的函数train()
    train_acc, train_obj = train(train_queue, model, criterion, optimizer)
    '''
    train_queue:要训练的队列
    model:采用的model;
    criterion:定义的损失函数
    optimizer:所采用的优化器
    '''
    logging.info('train_acc %f', train_acc) #打印当前epoch在训练集上的精度
    
    #计算在验证集上的精度
    valid_acc, valid_obj = infer(valid_queue, model, criterion)
    logging.info('valid_acc %f', valid_acc)
    #保存模型参数
    utils.save(model, os.path.join(args.save, 'weights.pt'))
Пример #27
0
def process_logs(args) -> DataFrame:
    data = []
    for log in args.train:
        row = []
        try:
            # evaluation stage metrics
            lines = str(log.readlines())
            match = re.search(r"arch='(?P<name>.*?)'", lines)
            name = match.group("name")
            row.append(name)
            # l2_loss_2e01 -> 2e-01
            weight_value = float(name.split("_")[2].replace("e", "e-"))
            row.append(weight_value)
            match = re.search(r"param size.*?(?P<value>\d*\.\d+)MB", lines)
            param_size = float(match.group("value"))
            row.append(param_size)
            for metric in [
                    TRAIN_LOSS, TRAIN_ACC, VALID_LOSS, VALID_ACC, TEST_LOSS,
                    TEST_ACC
            ]:
                value = float(
                    re.findall(rf'{metric}(?:uracy)? (?P<value>\d*\.\d+)',
                               lines)[-1])
                row.append(value)
        except Exception as e:
            print(f"Error '{e}' while processing file {log.name}")
            while len(row) < 9:
                row.append(None)

        try:
            # search stage metrics
            genotype = genotypes.__dict__[name]
            genotype_str = str(genotype)
            match = False
            for s_log in args.search:
                s_lines = str(s_log.readlines())
                s_log.seek(0, 0)
                # ((?!\\n).)* = anything except new line escaped
                match = re.search(
                    r"stats = (?P<stats>{((?!\\n).)*" +
                    re.escape(genotype_str) + r".*?})\\n\",", s_lines)
                if match:
                    stats = eval(match.group("stats"))
                    # L2 loss case
                    if list(stats.get(L1_LOSS).keys())[0][0] == -1:
                        LOSS = L2_LOSS
                    # L1 loss case
                    elif list(stats.get(L2_LOSS).keys())[0][0] == -1:
                        LOSS = L1_LOSS
                    else:
                        raise Exception("L1 and L2 loss have w = -1")
                    values = list(stats.get(LOSS).values())[0]
                    search_criterion_loss = values[CRITERION_LOSS]
                    search_reg_loss = values[REG_LOSS]
                    row.append(search_criterion_loss)
                    row.append(search_reg_loss)
                    search_acc = values[VALID_ACC]
                    row.append(search_acc)
                    break
            if not match:
                raise Exception(f"Didn't find {name} on eval logs")
        except Exception as e:
            print(f"Error '{e}' while processing file {log.name}")
            while len(row) < 12:
                row.append(None)

        try:
            # model profiling
            genotype = genotypes.__dict__[name]
            match = re.search(r"init_channels=(?P<value>\d+)", lines)
            init_channels = int(match.group("value"))
            match = re.search(r"layers=(?P<value>\d+)", lines)
            layers = int(match.group("value"))
            match = re.search(r"drop_path_prob=(?P<value>\d+\.\d+)", lines)
            drop_path_prob = float(match.group("value"))
            match = re.search(r"auxiliary=(?P<value>\w+)", lines)
            auxiliary = bool(match.group("value"))
            model = NetworkCIFAR(init_channels, 10, layers, auxiliary,
                                 genotype)
            model.cuda()
            model.drop_path_prob = drop_path_prob
            parameters, net_flops, total_time_gpu, total_time_cpu = model_profiling(
                model, name)
            row.append(parameters)
            row.append(net_flops)
            row.append(total_time_gpu)
            row.append(total_time_cpu)
        except Exception as e:
            print(f"Error '{e}' while processing file {log.name}")

        if len(row) > 0:
            data.append(row)
    df = pd.DataFrame(data,
                      columns=[
                          MODEL_NAME, WEIGHT, PARAMETERS_DARTS, TRAIN_LOSS,
                          TRAIN_ACC, VALID_LOSS, VALID_ACC, TEST_LOSS,
                          TEST_ACC, SEARCH_CRIT_LOSS, SEARCH_REG_LOSS,
                          SEARCH_ACC, PARAMETERS_OFA, FLOPS, LATENCY_GPU,
                          LATENCY_CPU
                      ])
    df.set_index(keys=MODEL_NAME, inplace=True)
    df.sort_values(by=WEIGHT, inplace=True, ascending=False)
    pd.set_option("display.max_rows", None, "display.max_columns", None,
                  "display.width", None)
    print(df)
    df.to_csv(args.output)
    return df
Пример #28
0
def main():
	if not torch.cuda.is_available():
		logging.info('no gpu device available')
		sys.exit(1)
	
	np.random.seed(args.seed)
	# torch.cuda.set_device(args.gpu)
	device = torch.device("cuda")
	cudnn.benchmark = True
	torch.manual_seed(args.seed)
	cudnn.enabled=True
	torch.cuda.manual_seed(args.seed)
	logging.info('gpu device = %d' % args.gpu)
	logging.info("args = %s", args)
	
	# read data
	train_transform, valid_transform = utils._data_transforms_cifar10(args)
	if args.dataset == 'cifar10':
		args.data = '/home/work/dataset/cifar'
		train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
		valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
		classes = 10
	if args.dataset == 'cifar100':
		args.data = '/home/work/dataset/cifar100'
		train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
		valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform)
		classes = 100
	train_queue = torch.utils.data.DataLoader(
		train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
	valid_queue = torch.utils.data.DataLoader(
		valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
	
	# model
	genotype = eval("genotypes.%s" % args.arch)
	model = Network(args.init_channels, classes, args.layers, args.auxiliary, genotype)
	model = model.cuda()
	model.drop_path_prob = args.drop_path_prob
	
	flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32).cuda(),), verbose=False)
	logging.info('flops = %fM', flops / 1e6)
	logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
	
	criterion = nn.CrossEntropyLoss()
	criterion = criterion.cuda()
	optimizer = torch.optim.SGD(
		model.parameters(),
		args.learning_rate,
		momentum=args.momentum,
		weight_decay=args.weight_decay
	)
	
	scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
	best_val_acc = 0.
	
	if args.resume:
		# state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s3_1-20200608/weights.pt')
		# state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s2_factor1-20200609/weights.pt', map_location='cpu')
		# state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s3_factor1-20200609/weights.pt', map_location='cpu')
		# state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s3_0-20200608/weights.pt', map_location='cpu')
		# state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s2_0-20200608/weights.pt', map_location='cpu')
		state = torch.load('/home/work/lixudong/code_work/sgas/cnn/full_train_s3_2-20200608/weights.pt', map_location='cpu')
		model.load_state_dict(state)
		model = model.to(device)
		for i in range(args.start_epoch):
			scheduler.step()
		best_val_acc = 97.19#97.34#97.32#94.92#94.6#97.2
		
	for epoch in range(args.start_epoch, args.epochs):
		scheduler.step()
		logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
		model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
		train_acc, train_obj = train(train_queue, model, criterion, optimizer)
		logging.info('train_acc %f', train_acc)
		
		with torch.no_grad():
			valid_acc, valid_obj = infer(valid_queue, model, criterion)
			if valid_acc > best_val_acc:
				best_val_acc = valid_acc
				utils.save(model, os.path.join(args.save, 'best_weights.pt'))
			# logging.info('valid_acc %f\tbest_val_acc %f', valid_acc, best_val_acc)
			logging.info('val_acc: {:.6}, best_val_acc: \033[31m{:.6}\033[0m'.format(valid_acc, best_val_acc))
		
		state = {
			'epoch': epoch,
			'model_state': model.state_dict(),
			'optimizer': optimizer.state_dict(),
			'best_val_acc': best_val_acc
		}
		torch.save(state, os.path.join(args.save, 'weights.pt.tar'))
Пример #29
0
def main(args):
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
                    args.auxiliary, genotype)
    model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.MSELoss()
    criterion = criterion.cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    train_data = BindingDataset(args.annofile, args.seqfile)

    num_train = len(train_data)
    indices = list(range(num_train))
    split = int(np.floor(0.7 * num_train))

    train_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              pin_memory=True,
                                              num_workers=2)

    valid_queue = torch.utils.data.DataLoader(train_data,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=2)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    for epoch in range(args.epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        train_acc, train_obj = train(train_queue, model, criterion, optimizer)
        logging.info('train_acc %f', train_acc)

        valid_acc, valid_obj = infer(valid_queue, model, criterion)
        logging.info('valid_acc %f', valid_acc)

        utils.save(model, os.path.join(args.save, 'weights.pt'))
Пример #30
0
def main():
    # Setup
    args = cmd_argument_parser()
    create_logger(args.save)

    # Set the gpu device to be used
    # NOTE: Only operates on a single GPU
    if torch.cuda.is_available():
        torch.cuda.set_device(int(args.gpu))
    else:
        logging.info('no gpu device available')
        sys.exit(1)

    # Ensure seeds are set
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # Hardware specific tuning
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True

    # Get the specific architecture to train
    genotype = genomes[args.arch]

    # Create the fixed network
    # Note: This differs from the Network used in model_search.py
    # TODO: Update the Network class
    model = Network(C=args.init_channels,
                    num_classes=CIFAR_CLASSES,
                    layers=args.layers,
                    auxiliary=args.auxiliary,
                    genotype=genotype)
    model = model.cuda()

    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)
    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    # The loss function
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    # Optimizer used to adjust the models parameters as well as an optimizer
    # of the learning rate
    optimizer = torch.optim.SGD(params=model.parameters(),
                                lr=args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    lr_scheduler = CosineAnnealingLR(optimizer=optimizer,
                                     T_max=float(args.epochs))

    # Get the transforms for both the train and validation data
    train_transform, valid_transform = utils._data_transforms_cifar10(args)

    # Get the data from torchvision's datasets
    train_data = CIFAR10(root=args.data,
                         train=True,
                         download=True,
                         transform=train_transform)
    valid_data = CIFAR10(root=args.data,
                         train=False,
                         download=True,
                         transform=valid_transform)

    # Create Dataloaders for both
    train_queue = DataLoader(train_data,
                             batch_size=args.batch_size,
                             shuffle=True,
                             pin_memory=True,
                             num_workers=0)

    valid_queue = DataLoader(valid_data,
                             batch_size=args.batch_size,
                             shuffle=False,
                             pin_memory=True,
                             num_workers=0)

    for epoch in range(args.epochs):
        logging.info(f'epoch = {epoch}')
        logging.info(f'lr = {lr_scheduler.get_last_lr()}')

        # More likely to drop a path as epochs progress
        model.drop_path_prob = args.drop_path_prob * (epoch / args.epochs)

        train_acc, train_obj = train(train_queue, model, criterion, optimizer,
                                     args)
        with torch.no_grad():
            valid_acc, valid_obj = infer(valid_queue, model, criterion, args)

        logging.info(f'train_acc = {train_acc}')
        logging.info(f'valid_acc = {valid_acc}')

        # Save the model for each epoch
        utils.save(model, os.path.join(args.save, 'weights.pt'))

        lr_scheduler.step()