Exemplo n.º 1
0
def run():
    t = time.time()
    print('net_cache : ', args.net_cache)

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    model = ResNet50()
    model = nn.DataParallel(model.cuda())

    if os.path.exists(args.net_cache):
        print('loading checkpoint {} ..........'.format(args.net_cache))
        checkpoint = torch.load(args.net_cache)
        best_top1_acc = checkpoint['best_top1_acc']
        model.load_state_dict(checkpoint['state_dict'])
        #print("loaded checkpoint {} epoch = {}" .format(args.net_cache, checkpoint['epoch']))

    else:
        print('can not find {} '.format(args.net_cache))
        return

    num_states = len(stage_repeat) + sum(stage_repeat)
    search(model, criterion, num_states)

    total_searching_time = time.time() - t
    print('total searching time = {:.2f} hours'.format(total_searching_time /
                                                       3600),
          flush=True)
Exemplo n.º 2
0
def RefineNet():
    resnet = ResNet50()

    # resnet.summary()

    endpoints = [
        'activation_9', 'activation_21', 'activation_39', 'activation_48'
    ]
    f = [resnet.get_layer(name).output for name in endpoints]

    g = [None, None, None, None]
    h = [None, None, None, None]

    for i in range(4):
        h[i] = Conv2D(256, 1, padding="same")(f[i])

    for i in range(4):
        print(i, h[i].shape)

    g[0] = RefineBlock(high_inputs=None, low_inputs=h[0])
    print(0, g[0], h[1])
    g[1] = RefineBlock(g[0], h[1])
    print(1, g[1], h[2])
    g[2] = RefineBlock(g[1], h[2])
    print(2, g[2], h[3])
    g[3] = RefineBlock(g[2], h[3])
    print(3)
    F_score = Conv2D(21, 1, activation="relu", padding="same")(g[3])

    return Model(resnet.inputs, F_score)
Exemplo n.º 3
0
def main():
    args = parse_args()
    operators = create_operators(args.interpolation)
    # assign the place
    place = 'gpu:{}'.format(ParallelEnv().dev_id) if args.use_gpu else 'cpu'
    place = paddle.set_device(place)

    net = ResNet50()
    load_dygraph_pretrain(net, args.pretrained_model)

    img = cv2.imread(args.image_file, cv2.IMREAD_COLOR)
    data = preprocess(img, operators)
    data = np.expand_dims(data, axis=0)
    data = paddle.to_tensor(data)
    net.eval()
    _, fm = net(data)
    assert args.channel_num >= 0 and args.channel_num <= fm.shape[
        1], "the channel is out of the range, should be in {} but got {}".format(
            [0, fm.shape[1]], args.channel_num)

    fm = (np.squeeze(fm[0][args.channel_num].numpy()) * 255).astype(np.uint8)
    fm = cv2.resize(fm, (img.shape[1], img.shape[0]))
    if args.save_path is not None:
        print("the feature map is saved in path: {}".format(args.save_path))
        cv2.imwrite(args.save_path, fm)
def trainResnet(checkpoint = None):
    X = tf.placeholder("float", [None, 28, 28, 1], "X")
    Y = tf.placeholder("float", [None, 10], "Y")

    resnet = ResNet50(X, 10, True)
    output_Y = resnet.model()
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = output_Y, labels = Y))
    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
    pred = tf.argmax(output_Y, 1)
    acc = tf.reduce_mean(tf.cast(tf.equal(pred, tf.argmax(Y, 1)), "float"))
    print(tf.trainable_variables())
    summary_loss = tf.summary.scalar("loss", loss)
    saver = tf.train.Saver(max_to_keep = 5)

    epochs = 5
    batch_size = 50
    iteration = 0

    merged_summary_op = tf.summary.merge_all()

    with tf.Session(config = tf.ConfigProto(gpu_options = tf.GPUOptions(allow_growth = True))) as sess:
        sess.run(tf.initialize_all_variables())
        if checkpoint:
            saver.restore(sess, tf.train.latest_checkpoint(checkpoint))
        summary_writer = tf.summary.FileWriter(resnet_log_directory, tf.get_default_graph())
        for e in range(epochs):
            for i in range(len(trX) // batch_size + 1):
                iteration += 1
                batch = mnist.train.next_batch(batch_size)
                _, accuracy, summary = sess.run([train_op, acc, merged_summary_op], \
                                                 feed_dict = {X:batch[0].reshape([-1, 28, 28, 1]), Y:batch[1]})
                summary_writer.add_summary(summary, iteration)
                print(accuracy)
            saver.save(sess, save_path = resnet_model_directory + str(e) + ".ckpt")
Exemplo n.º 5
0
def train():

    images = tf.placeholder(tf.float32,
                            [BATCH_SIZE, MODEL_SIZE, MODEL_SIZE, rgb],
                            name="images")
    labels = tf.placeholder(tf.float32, [BATCH_SIZE, CAT_NUM], name='label')

    net = ResNet50({'data': images})
    final_layer = net.layers['prob']
    pred = tf.nn.softmax(final_layer)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=final_layer,
                                                labels=labels), 0)
    opt = tf.train.GradientDescentOptimizer(LEARNING_RATE)
    train_op = opt.minimize(loss)

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.7
    saver = tf.train.Saver()
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        resized_data = transfer_data()
        for i in range(RANGE):
            X_image, Y_flat = next(resized_data)
            #print X_image, Y_flat
            np_loss, np_pred, _ = sess.run([loss, pred, train_op],
                                           feed_dict={
                                               images: X_image,
                                               labels: Y_flat
                                           })
            if i % 10 == 0:
                print('Iteration: ', i * 10, np_loss)
            if i % 100 == 0:
                saver.save(sess, STORED_PATH + 'model.ckpt')
    print("Succ!")
Exemplo n.º 6
0
def load_pretrained_cifar_resnet50():
    """ Helper fxn to initialize/load a pretrained resnet-50 """

    state_dict = torch.load(c.target_weight_path)['state_dict']
    classifier_net = ResNet50()
    classifier_net = torch.nn.DataParallel(classifier_net)
    classifier_net.apply(weights_init)
    classifier_net.load_state_dict(state_dict, strict=True)

    return classifier_net
Exemplo n.º 7
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("device : ", device)
    model = ResNet50()
    model.to(device)
    input = torch.randn(1, 3, 224, 224)
    cuinput = input.to(device)
    with torch.cuda.profiler.profile():
        model(cuinput)
        with torch.autograd.profiler.emit_nvtx():
            model(cuinput)
Exemplo n.º 8
0
def get_new_model(tmp_scale=True, num_classes=args.num_classes):
    if args.model == 'resnet18':
        return ResNet18(tmp_scale=tmp_scale, num_classes=num_classes)
    elif args.model == 'resnet50':
        return ResNet50(tmp_scale=tmp_scale, num_classes=num_classes)
    elif args.model == 'resnet101':
        return ResNet101(tmp_scale=tmp_scale, num_classes=num_classes)
    elif args.model == 'inceptionv4':
        return inceptionv4(tmp_scale=tmp_scale, num_classes=num_classes)
    elif args.model == 'densenet':
        return DenseNet(tmp_scale=tmp_scale)
Exemplo n.º 9
0
def main():
    test_patterns = [
        ('VGGNetBN', VGGNetBN(17), 224), ('VGGNetBNHalf', VGGNetBN(17,
                                                                   32), 224),
        ('VGGNetBNQuater', VGGNetBN(17, 16), 224),
        ('GoogLeNetBN', GoogLeNetBN(17), 224),
        ('GoogLeNetBNHalf', GoogLeNetBN(17, 16), 224),
        ('GoogLeNetBNQuater', GoogLeNetBN(17, 8), 224),
        ('ResNet50', ResNet50(17), 224), ('ResNet50Half', ResNet50(17,
                                                                   32), 224),
        ('ResNet50Quater', ResNet50(17, 16), 224),
        ('SqueezeNet', SqueezeNet(17), 224),
        ('SqueezeNetHalf', SqueezeNet(17, 8), 224),
        ('MobileNet', MobileNet(17), 224),
        ('MobileNetHalf', MobileNet(17, 16), 224),
        ('MobileNetQuater', MobileNet(17, 8), 224),
        ('InceptionV4', InceptionV4(dim_out=17), 299),
        ('InceptionV4S',
         InceptionV4(dim_out=17,
                     base_filter_num=6,
                     ablocks=2,
                     bblocks=1,
                     cblocks=1), 299),
        ('InceptionResNetV2', InceptionResNetV2(dim_out=17), 299),
        ('InceptionResNetV2S',
         InceptionResNetV2(dim_out=17,
                           base_filter_num=8,
                           ablocks=1,
                           bblocks=2,
                           cblocks=1), 299),
        ('FaceClassifier100x100V', FaceClassifier100x100V(17), 100),
        ('FaceClassifier100x100V2', FaceClassifier100x100V2(17), 100)
    ]

    for model_name, model, test_size in test_patterns:
        oltp_cpu, batch_gpu = check_speed(model, test_images[test_size])
        print('{}\t{:.02f}\t{:.02f}'.format(model_name, oltp_cpu * 1000,
                                            batch_gpu * 1000))
Exemplo n.º 10
0
    def __init__(self,
                 num_classes=16,
                 n_blocks=[3, 4, 6, 3],
                 atrous_rates=[6, 12, 18],
                 multi_grids=[1, 2, 1],
                 output_stride=16):
        super(DeepLabV3, self).__init__()

        print(ch)
        self.num_classes = num_classes
        self.backbone = ResNet50(n_blocks, multi_grids, output_stride)
        self.add_module("aspp", _ASPP(ch[5], 256, atrous_rates))
        concat_ch = 256 * (len(atrous_rates) + 2)
        self.add_module("fc1", _ConvBnReLU(concat_ch, 256, 1, 1, 0, 1))
        self.add_module("fc2", nn.Conv2d(256, num_classes, kernel_size=1))
def test_resnet(checkpoint = resnet_model_directory):
    X = tf.placeholder("float", [None, 28, 28, 1], "X")
    Y = tf.placeholder("float", [None, 10], "Y")

    resnet = ResNet50(X, 10, False)
    output_Y = resnet.model()
    acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(output_Y, 1), tf.argmax(Y, 1)), "float"))
    saver = tf.train.Saver()
    print(tf.trainable_variables())

    with tf.Session(config = tf.ConfigProto(gpu_options = tf.GPUOptions(allow_growth = True))) as sess:
        sess.run(tf.initialize_all_variables())
        saver.restore(sess, tf.train.latest_checkpoint(checkpoint))
        accuracy = sess.run([acc], feed_dict = {X:teX, Y:teY})
    print(accuracy)
Exemplo n.º 12
0
def get_model(model_name):
    if model_name=='CNN': return CNN()
    if model_name=='CNN_GAP': return CNN(GAP=True)
    if model_name=='VGG16': return VGG16(batch_norm=False)
    if model_name=='VGG11_BN': return VGG11(batch_norm=True)
    if model_name=='VGG13_BN': return VGG13(batch_norm=True)
    if model_name=='VGG16_BN': return VGG16(batch_norm=True)
    if model_name=='VGG11_GAP': return VGG11(batch_norm=True, GAP=True)
    if model_name=='VGG13_GAP': return VGG13(batch_norm=True, GAP=True)
    if model_name=='VGG16_GAP': return VGG16(batch_norm=True, GAP=True)
    if model_name=='ResNet18': return ResNet18()
    if model_name=='ResNet34': return ResNet34()
    if model_name=='ResNet50': return ResNet50()
    if model_name=='ResNet101': return ResNet101()
    raise NotImplementedError('Model has not been implement.')
Exemplo n.º 13
0
def init_ensemble(paths):
    weights_path = ROOT + '/resnet50_tf_notop.h5'
    ensemble = Sequential()
    ensemble.add(
        ResNet50(input_shape=(img_side, img_side, 3),
                 weights_path=weights_path))
    heads = [ensemble.output for _ in xrange(len(paths))]
    for i, path in enumerate(paths):
        temp_model = load_model(path)
        for layer in temp_model.layers[head_start:]:
            print layer.name
            heads[i] = layer(heads[i])
    # outs = [head (ensemble) for head in heads]
    merged = merge(outs, mode='ave')
    return Model(ensemble.input, merged)
Exemplo n.º 14
0
def main():
    args = parser.parse_args()

    # model

    resnet = ResNet50()
    model = BYOL.BYOL(resnet)
    optimizer = paddle.optimizer.Adam(learning_rate=args.lr,
        parameters=model.parameters())

    # data                    

    root = os.getcwd()
    traindir = os.path.join(root, 'testimg')


    # augmentation utils

    normalize = transforms.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
                                     std=[0.229 * 255, 0.224 * 255, 0.225 * 255], data_format='HWC')

    augmentation = [
        byol.transforms.RandomApply([
            transforms.ColorJitter(0.8, 0.8, 0.8, 0.2),
        ],p = 0.3),
        byol.transforms.RandomGrayscale(p=0.2),
        byol.transforms.RandomApply([byol.transforms.GaussianBlur((1.0, 2.0))],p=0.2),
        transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
        normalize
    ]



    
    byoltransforms = byol.transforms.TwoCropsTransform(transforms.Compose(augmentation))

    cifar10_train = Cifar10(mode='train', transform=byoltransforms)
    train_loader = paddle.io.DataLoader(cifar10_train,
                                        shuffle=True,
                                        batch_size=args.bs)


    for epoch in range(args.epochs):
        train(train_loader, model, optimizer, epoch, args)
Exemplo n.º 15
0
# %%
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

trainset = datasets.CIFAR10(root='/tmp/data',
                            train=True,
                            download=True,
                            transform=transform)
trainset = Subset(trainset, range(100))
trainloader = DataLoader(trainset, batch_size=50, shuffle=False, num_workers=1)

# %%
from resnet import ResNet50
resnet = ResNet50().cuda()

layer_collection = LayerCollection.from_model(resnet)
v = random_pvector(LayerCollection.from_model(resnet), device='cuda')

print(f'{layer_collection.numel()} parameters')

# %%
# compute timings and display FIMs


def perform_timing():
    timings = dict()

    for repr in [PMatImplicit, PMatDiag, PMatEKFAC, PMatKFAC, PMatQuasiDiag]:
Exemplo n.º 16
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    random.seed(args.seed)
    np.random.seed(
        args.data_seed)  # cutout and load_corrupted_data use np.random
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = False
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    cudnn.deterministic = True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    if args.arch == 'resnet':
        model = ResNet18(CIFAR_CLASSES).cuda()
        args.auxiliary = False
    elif args.arch == 'resnet50':
        model = ResNet50(CIFAR_CLASSES).cuda()
        args.auxiliary = False
    elif args.arch == 'resnet34':
        model = ResNet34(CIFAR_CLASSES).cuda()
        args.auxiliary = False
    else:
        genotype = eval("genotypes.%s" % args.arch)
        model = Network(args.init_channels, CIFAR_CLASSES, args.layers,
                        args.auxiliary, genotype)
        model = model.cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    optimizer = torch.optim.SGD(model.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    train_transform, test_transform = utils._data_transforms_cifar10(args)

    # Load dataset
    if args.dataset == 'cifar10':
        noisy_train_data = CIFAR10(root=args.data,
                                   train=True,
                                   gold=False,
                                   gold_fraction=0.0,
                                   corruption_prob=args.corruption_prob,
                                   corruption_type=args.corruption_type,
                                   transform=train_transform,
                                   download=True,
                                   seed=args.data_seed)
        gold_train_data = CIFAR10(root=args.data,
                                  train=True,
                                  gold=True,
                                  gold_fraction=1.0,
                                  corruption_prob=args.corruption_prob,
                                  corruption_type=args.corruption_type,
                                  transform=train_transform,
                                  download=True,
                                  seed=args.data_seed)
        test_data = dset.CIFAR10(root=args.data,
                                 train=False,
                                 download=True,
                                 transform=test_transform)
    elif args.dataset == 'cifar100':
        noisy_train_data = CIFAR100(root=args.data,
                                    train=True,
                                    gold=False,
                                    gold_fraction=0.0,
                                    corruption_prob=args.corruption_prob,
                                    corruption_type=args.corruption_type,
                                    transform=train_transform,
                                    download=True,
                                    seed=args.data_seed)
        gold_train_data = CIFAR100(root=args.data,
                                   train=True,
                                   gold=True,
                                   gold_fraction=1.0,
                                   corruption_prob=args.corruption_prob,
                                   corruption_type=args.corruption_type,
                                   transform=train_transform,
                                   download=True,
                                   seed=args.data_seed)
        test_data = dset.CIFAR100(root=args.data,
                                  train=False,
                                  download=True,
                                  transform=test_transform)

    num_train = len(gold_train_data)
    indices = list(range(num_train))
    split = int(np.floor(args.train_portion * num_train))

    if args.gold_fraction == 1.0:
        train_data = gold_train_data
    else:
        train_data = noisy_train_data
    train_queue = torch.utils.data.DataLoader(
        train_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
        pin_memory=True,
        num_workers=0)

    if args.clean_valid:
        valid_data = gold_train_data
    else:
        valid_data = noisy_train_data

    valid_queue = torch.utils.data.DataLoader(
        valid_data,
        batch_size=args.batch_size,
        sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:]),
        pin_memory=True,
        num_workers=0)

    test_queue = torch.utils.data.DataLoader(test_data,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             pin_memory=True,
                                             num_workers=2)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(args.epochs))

    if args.loss_func == 'cce':
        criterion = nn.CrossEntropyLoss().cuda()
    elif args.loss_func == 'rll':
        criterion = utils.RobustLogLoss(alpha=args.alpha).cuda()
    elif args.loss_func == 'forward_gold':
        corruption_matrix = train_data.corruption_matrix
        criterion = utils.ForwardGoldLoss(corruption_matrix=corruption_matrix)
    else:
        assert False, "Invalid loss function '{}' given. Must be in {'cce', 'rll'}".format(
            args.loss_func)

    for epoch in range(args.epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        train_acc, train_obj = train(train_queue, model, criterion, optimizer)
        logging.info('train_acc %f', train_acc)

        valid_acc, valid_obj = infer_valid(valid_queue, model, criterion)
        logging.info('valid_acc %f', valid_acc)

        test_acc, test_obj = infer(test_queue, model, criterion)
        logging.info('test_acc %f', test_acc)

        utils.save(model, os.path.join(args.save, 'weights.pt'))
Exemplo n.º 17
0
def main(config):
    # load config
    etl_config = config['etl']
    experiment_config = config['experiment']
    model_config = config['model']

    # pre-set parameters
    epochs = experiment_config['epochs']
    use_pretrained = experiment_config['use_pretrained']

    # load device
    device_use = load_device(experiment_config['cuda'])

    # creating dataloaders if necessary
    #     if not os.path.exists(os.path.join(etl_config['dataloader_dir'], "train_dataloader.pickle")):
    data_generator = CIFAR10DataPrep(etl_config)

    if experiment_config['test_pipeline']:
        data_generator.prepare_pipeline_dataloader(pipeline_size=300)
        loaders = [
            'pip_train_dataloader.pickle', 'pip_val_dataloader.pickle',
            'pip_test_dataloader.pickle'
        ]
    else:
        data_generator.prepare_dataloader()
        loaders = [
            'train_dataloader.pickle', 'val_dataloader.pickle',
            'test_dataloader.pickle'
        ]

    return None
    # load dataloaders
    with open(os.path.join(etl_config['dataloader_dir'], loaders[0]),
              "rb") as traindl:
        train_dataloader = pickle.load(traindl)

    with open(os.path.join(etl_config['dataloader_dir'], loaders[1]),
              "rb") as valdl:
        val_dataloader = pickle.load(valdl)

    with open(os.path.join(etl_config['dataloader_dir'], loaders[2]),
              "rb") as testdl:
        test_dataloader = pickle.load(testdl)

    # load model
    if use_pretrained:
        model = CifarResNet50(model_config).to(device_use)
    else:
        model = ResNet50().to(device_use)

    EXPERIMENT_IDX = experiment_config['EXPERIMENT_IDX']

    # load loss, optm, scheduler
    criterion = nn.CrossEntropyLoss()

    optimizer = optim.SGD(model.parameters(),
                          lr=model_config['lr'],
                          momentum=model_config['momentum'],
                          weight_decay=model_config['weight_decay'])

    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)

    for e in range(0, epochs):
        break
        train(e, device_use, train_dataloader, model, criterion, optimizer)
        validate(e, device_use, val_dataloader, model, criterion,
                 'ckpt%s.pth' % EXPERIMENT_IDX)
        scheduler.step()

    print('==> Resuming from checkpoint..')
    assert os.path.isdir(
        '../checkpoint'), 'Error: no checkpoint directory found!'
    ckpt = torch.load('../checkpoint/ckpt%s.pth' % EXPERIMENT_IDX)
    model.load_state_dict(ckpt['model'])

    validate('testing',
             device_use,
             test_dataloader,
             model,
             criterion,
             ckpt_name='testing%s.pth' % EXPERIMENT_IDX)
Exemplo n.º 18
0
def main():
    checkpoint = utils.checkpoint(args)
    writer_train = SummaryWriter(args.job_dir + '/run/train')
    writer_test = SummaryWriter(args.job_dir + '/run/test')

    start_epoch = 0
    best_prec1 = 0.0
    best_prec5 = 0.0

    # Data loading
    # while(1):
    #     a=2
    print('=> Preparing data..')
    logging.info('=> Preparing data..')

    traindir = os.path.join('/mnt/cephfs_hl/cv/ImageNet/',
                            'ILSVRC2012_img_train_rec')
    valdir = os.path.join('/mnt/cephfs_hl/cv/ImageNet/',
                          'ILSVRC2012_img_val_rec')
    train_loader, val_loader = getTrainValDataset(traindir, valdir,
                                                  batch_sizes, 100, num_gpu,
                                                  num_workers)

    # Create model
    print('=> Building model...')
    logging.info('=> Building model...')

    model_t = ResNet50()

    # model_kd = resnet101(pretrained=False)

    #print(model_kd)
    # Load teacher model
    ckpt_t = torch.load(args.teacher_dir,
                        map_location=torch.device(f"cuda:{args.gpus[0]}"))
    state_dict_t = ckpt_t
    new_state_dict_t = OrderedDict()

    new_state_dict_t = state_dict_t

    model_t.load_state_dict(new_state_dict_t)
    model_t = model_t.to(args.gpus[0])

    for para in list(model_t.parameters())[:-2]:
        para.requires_grad = False

    model_s = ResNet50_sprase().to(args.gpus[0])
    model_dict_s = model_s.state_dict()
    model_dict_s.update(new_state_dict_t)
    model_s.load_state_dict(model_dict_s)

    #ckpt_kd = torch.load('resnet101-5d3b4d8f.pth', map_location=torch.device(f"cuda:{args.gpus[0]}"))
    #state_dict_kd = ckpt_kd
    #new_state_dict_kd = state_dict_kd
    #model_kd.load_state_dict(new_state_dict_kd)
    #model_kd = model_kd.to(args.gpus[0])

    #for para in list(model_kd.parameters())[:-2]:
    #para.requires_grad = False

    model_d = Discriminator().to(args.gpus[0])

    model_s = nn.DataParallel(model_s).cuda()
    model_t = nn.DataParallel(model_t).cuda()
    model_d = nn.DataParallel(model_d).cuda()

    optimizer_d = optim.SGD(model_d.parameters(),
                            lr=args.lr,
                            momentum=args.momentum,
                            weight_decay=args.weight_decay)

    param_s = [
        param for name, param in model_s.named_parameters()
        if 'mask' not in name
    ]
    param_m = [
        param for name, param in model_s.named_parameters() if 'mask' in name
    ]

    optimizer_s = optim.SGD(param_s,
                            lr=args.lr,
                            momentum=args.momentum,
                            weight_decay=args.weight_decay)
    optimizer_m = FISTA(param_m, lr=args.lr * 100, gamma=args.sparse_lambda)

    scheduler_d = StepLR(optimizer_d, step_size=args.lr_decay_step, gamma=0.1)
    scheduler_s = StepLR(optimizer_s, step_size=args.lr_decay_step, gamma=0.1)
    scheduler_m = StepLR(optimizer_m, step_size=args.lr_decay_step, gamma=0.1)

    resume = args.resume
    if resume:
        print('=> Resuming from ckpt {}'.format(resume))
        ckpt = torch.load(resume,
                          map_location=torch.device(f"cuda:{args.gpus[0]}"))
        state_dict_s = ckpt['state_dict_s']
        state_dict_d = ckpt['state_dict_d']

        new_state_dict_s = OrderedDict()
        for k, v in state_dict_s.items():
            new_state_dict_s['module.' + k] = v

        best_prec1 = ckpt['best_prec1']
        model_s.load_state_dict(new_state_dict_s)
        model_d.load_state_dict(ckpt['state_dict_d'])
        optimizer_d.load_state_dict(ckpt['optimizer_d'])
        optimizer_s.load_state_dict(ckpt['optimizer_s'])
        optimizer_m.load_state_dict(ckpt['optimizer_m'])
        scheduler_d.load_state_dict(ckpt['scheduler_d'])
        scheduler_s.load_state_dict(ckpt['scheduler_s'])
        scheduler_m.load_state_dict(ckpt['scheduler_m'])
        start_epoch = ckpt['epoch']
        print('=> Continue from epoch {}...'.format(ckpt['epoch']))

    models = [model_t, model_s, model_d]  #, model_kd]
    optimizers = [optimizer_d, optimizer_s, optimizer_m]
    schedulers = [scheduler_d, scheduler_s, scheduler_m]

    for epoch in range(start_epoch, args.num_epochs):
        for s in schedulers:
            s.step(epoch)

        #global g_e
        #g_e = epoch
        #gl.set_value('epoch',g_e)

        train(args, train_loader, models, optimizers, epoch, writer_train)
        test_prec1, test_prec5 = test(args, val_loader, model_s)

        is_best = best_prec1 < test_prec1
        best_prec1 = max(test_prec1, best_prec1)
        best_prec5 = max(test_prec5, best_prec5)

        model_state_dict = model_s.module.state_dict() if len(
            args.gpus) > 1 else model_s.state_dict()

        state = {
            'state_dict_s': model_state_dict,
            'state_dict_d': model_d.state_dict(),
            'best_prec1': best_prec1,
            'best_prec5': best_prec5,
            'optimizer_d': optimizer_d.state_dict(),
            'optimizer_s': optimizer_s.state_dict(),
            'optimizer_m': optimizer_m.state_dict(),
            'scheduler_d': scheduler_d.state_dict(),
            'scheduler_s': scheduler_s.state_dict(),
            'scheduler_m': scheduler_m.state_dict(),
            'epoch': epoch + 1
        }
        train_loader.reset()
        val_loader.reset()
        #if is_best:
        checkpoint.save_model(state, epoch + 1, is_best)
        #checkpoint.save_model(state, 1, False)

    print(f"=> Best @prec1: {best_prec1:.3f} @prec5: {best_prec5:.3f}")
    logging.info('Best Top1: %e Top5: %e ', best_prec1, best_prec5)
Exemplo n.º 19
0
# opt = Adam(lr=INIT_LR)
opt = SGD(lr=INIT_LR, momentum=0.9)

if args["model"] is None:
    print("[INFO] compiling distributed model...")
    # create model on CPU
    with tf.device('/cpu:0'):

        #model = ResNet50(64, 64, 3, NUM_CLASSES, reg=5e-4, bnEps=2e-5,
        #        bnMom=0.9)

        # EPX 1-4 & 6
        model = ResNet50(64,
                         64,
                         3,
                         NUM_CLASSES,
                         reg=5e-4,
                         bnEps=2e-5,
                         bnMom=0.9,
                         dataset="cifar")

    # create distribute strategy for TF2.0
    strategy = tf.distribute.MirroredStrategy()
    with strategy.scope():
        parallel_model = multi_gpu_model(model, gpus=2)
        parallel_model.compile(loss="categorical_crossentropy",
                               optimizer=opt,
                               metrics=METRICS)

else:
    print("[INFO] loading %s ..." % args["model"])
    # load single GPU model back! need to specify extra metrics!
Exemplo n.º 20
0
from flask import Flask, request, jsonify
import keras
from keras.optimizers import Adam
import numpy as np
from keras.preprocessing import image
import json
import keras.backend as K
from utils import decode_img, classes
from resnet import ResNet50
import tensorflow as tf

# Model
K.clear_session()
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
model = ResNet50(input_shape=(64, 64, 3), classes=47)
print(model.summary())
model.compile(optimizer=Adam(lr=0.001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             decay=0.002),
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.load_weights('model.hdf5')
graph = tf.get_default_graph()

# initialization
app = Flask(__name__)

Exemplo n.º 21
0
def main():
    if not torch.cuda.is_available():
        sys.exit(1)
    start_t = time.time()

    cudnn.benchmark = True
    cudnn.enabled = True
    logging.info("args = %s", args)

    model = ResNet50()
    logging.info(model)
    model = nn.DataParallel(model).cuda()

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
    criterion_smooth = criterion_smooth.cuda()

    all_parameters = model.parameters()
    weight_parameters = []
    for pname, p in model.named_parameters():
        if 'fc' in pname or 'conv' in pname:
            weight_parameters.append(p)
    weight_parameters_id = list(map(id, weight_parameters))
    other_parameters = list(
        filter(lambda p: id(p) not in weight_parameters_id, all_parameters))

    optimizer = torch.optim.SGD(
        [{
            'params': other_parameters
        }, {
            'params': weight_parameters,
            'weight_decay': args.weight_decay
        }],
        args.learning_rate,
        momentum=args.momentum,
    )

    #scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step : (1.0-step/args.epochs), last_epoch=-1)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer,
        milestones=[args.epochs // 4, args.epochs // 2, args.epochs // 4 * 3],
        gamma=0.1)
    start_epoch = 0
    best_top1_acc = 0
    checkpoint_tar = os.path.join(args.save, 'checkpoint.pth.tar')
    if os.path.exists(checkpoint_tar):
        logging.info('loading checkpoint {} ..........'.format(checkpoint_tar))
        checkpoint = torch.load(checkpoint_tar)
        start_epoch = checkpoint['epoch']
        best_top1_acc = checkpoint['best_top1_acc']
        model.load_state_dict(checkpoint['state_dict'])
        logging.info("loaded checkpoint {} epoch = {}".format(
            checkpoint_tar, checkpoint['epoch']))
    for epoch in range(start_epoch):
        scheduler.step()

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    crop_scale = 0.08
    lighting_param = 0.1
    train_transforms = transforms.Compose([
        transforms.RandomResizedCrop(224, scale=(crop_scale, 1.0)),
        Lighting(lighting_param),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])

    train_dataset = datasets.ImageFolder(traindir, transform=train_transforms)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    epoch = start_epoch
    while epoch < args.epochs:
        train_obj, train_top1_acc, train_top5_acc, epoch = train(
            epoch, train_loader, model, criterion_smooth, optimizer, scheduler)
        valid_obj, valid_top1_acc, valid_top5_acc = validate(
            epoch, val_loader, model, criterion, args)

        is_best = False
        if valid_top1_acc > best_top1_acc:
            best_top1_acc = valid_top1_acc
            is_best = True

        save_checkpoint(
            {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'best_top1_acc': best_top1_acc,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save)

        epoch += 1

    training_time = (time.time() - start_t) / 36000
    print('total training time = {} hours'.format(training_time))
Exemplo n.º 22
0
    rotation_range=20,
    width_shift_range=0.2,
    height_shift_range=0.2,
    dtype='uint8',
    preprocessing_function=lambda image:image.astype(np.uint8),
    horizontal_flip=True
)
datagen.fit(x_train)


# 训练
for batch_size in [5]:
    for lr in [0.01]:
        print('lr={0},batchsize={1}'.format(lr,batch_size))

        model=ResNet50(input_shape=(32,32,3),classes=num_classes)


        model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=lr),metrics=['acc'])
        print(datagen.flow(x_train, y_train, batch_size=batch_size))
        # datagen.flow(x_train, y_train, batch_size=batch_size)
        model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                    steps_per_epoch=len(x_train) / batch_size, epochs=epochs,validation_data=(x_test,y_test))
        model.save('model'+str(batch_size)+'_'+str(lr)+'.h5')
        model.save_weights('modelweight'+str(batch_size)+'_'+str(lr)+'.h5')



    

Exemplo n.º 23
0
def wa_sgd_run(rank, args):
    global best_acc1

    if rank == 0:
        print(
            "async wa_sgd ",
            "average weight" if args.ave_weight else "average \
        gradients")
        if args.adjlr:
            writer = SummaryWriter(
                log_dir=args.logdir,
                comment='opt_module_wa_sgd_ave_w_vgg16_adjlr{:.3f}_m{:.2f}'.
                format(args.lr, args.momentum) if args.ave_weight else
                'opt_module_wa_sgd_ave_g_vgg16_adjlr{:.3f}_m{:.2f}'.format(
                    args.lr, args.momentum))
        else:
            writer = SummaryWriter(
                log_dir=args.logdir,
                comment='opt_module_wa_sgd_ave_w_vgg16_lr{:.3f}_m{:.2f}'.
                format(args.lr, args.momentum) if args.ave_weight else
                'opt_module_wa_sgd_ave_g_vgg16_lr{:.3f}_m{:.2f}'.format(
                    args.lr, args.momentum))

    #init process group
    if args.gpus is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = ",".join(args.gpus)
    dist.init_process_group(backend=args.dist_backend,
                            init_method=args.dist_url,
                            world_size=args.world_size,
                            rank=rank)

    args.batch_size = int(args.batch_size / args.world_size)

    device = torch.device("cuda:{}".format(rank))
    print("batchsize ", args.batch_size, " rank ", rank, " device ", device)

    #model = VGG16OPO(num_classes=args.classes).to(device)
    #model = models.resnet50(num_classes=args.classes).to(device)

    model = ResNet50(num_classes=args.classes).to(device)
    #model = models.resnet50(num_classes=args.classes).to(device)
    #model = models.vgg16_bn(num_classes=args.classes).to(device)
    #model = torch.nn.parallel.DistributedDataParallel(model)
    criterion = nn.CrossEntropyLoss().to(device)

    #model = VGG16(num_classes=args.classes).cuda()
    #model = torch.nn.parallel.DistributedDataParallel(model)

    optimizer = optim.SGD(model.parameters(),
                          args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    if args.sgd == "SGDOPO":
        optimizer_alpha = SGDOPO(model,
                                 args.lr,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay,
                                 alpha=1.0 / dist.get_world_size())
    elif args.sgd == "SGDOPO_W":
        optimizer_alpha = SGDOPO_W(model,
                                   args.lr,
                                   momentum=args.momentum,
                                   weight_decay=args.weight_decay,
                                   alpha=1.0 / dist.get_world_size())
    elif args.sgd == "SGDOPO_MW":
        optimizer_alpha = SGDOPO_MW(model,
                                    rank,
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    alpha=1.0 / dist.get_world_size())
    elif args.sgd == "SGDOPO_BW":
        optimizer_alpha = SGDOPO_BW(model,
                                    rank,
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    alpha=1.0 / dist.get_world_size())
    #optimizer = optim.SGD(model.parameters(), args.lr)
    #optimizer = optim.SGD(model.parameters(), args.lr, momentum=args.momentum)

    #open cudnn benchmark
    #cudnn.benchmark = True

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        train_dir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        val_dir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    acc_red = torch.zeros(1).to(device)
    acum_time = 0.0
    dist.barrier()
    for epoch in range(0, args.epochs):
        train_sampler.set_epoch(epoch)
        if args.adjlr:
            adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        batch_time = train(device, train_loader, model, criterion, optimizer,
                           optimizer_alpha, epoch, args)
        acum_time += batch_time

        # evaluate on validation set
        acc1 = validate(device, val_loader, model, criterion, args)

        # average acc1
        acc_red[0] = acc1
        dist.reduce(tensor=acc_red, dst=0, op=dist.ReduceOp.SUM)
        acc_red.div_(args.world_size * 1.0)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)
        #print("final best acc of epoch %d : %f" % (epoch, best_acc1))
        if rank == 0:
            print("==> acc1 ", acc_red[0].item())
            writer.add_scalar('test acc1 ', acc_red[0].item(), epoch)
            writer.add_scalar('acc1 over time(0.1s)', acc_red[0].item(),
                              int(acum_time * 10))
Exemplo n.º 24
0
	k1 = np.around((k1*std[0]+mean[0])*255)
	k2 = np.around((k2*std[1]+mean[1])*255)
	k3 = np.around((k3*std[2]+mean[2])*255)
	r = Image.fromarray(k1).convert('L')
	g = Image.fromarray(k2).convert('L')
	b = Image.fromarray(k3).convert('L')
	raw = Image.merge('RGB', (r, g, b))
	raw.save(path+name+'.png')

net1 = torch.nn.DataParallel(ResNet18().cuda()).eval()
net1.load_state_dict(torch.load('./pytorch_cifar/checkpoint/ckpt4.t7')['net'])
net2 = torch.nn.DataParallel(SENet18().cuda()).eval()
net2.load_state_dict(torch.load('./pytorch_cifar/checkpoint/ckpt_senet.t7')['net'])
net3 = torch.nn.DataParallel(DenseNet121().cuda()).eval()
net3.load_state_dict(torch.load('./pytorch_cifar/checkpoint/ckpt_dense.t7')['net'])
net4 = torch.nn.DataParallel(ResNet50().cuda()).eval()
net4.load_state_dict(torch.load('./pytorch_cifar/checkpoint/ckpt_resnet50.t7')['net'])
net5 = torch.nn.DataParallel(VGG('VGG19').cuda()).eval()
net5.load_state_dict(torch.load('./pytorch_cifar/checkpoint/ckpt_vgg.t7')['net'])

net_dict={'resnet50:'net4}
confidences= [50.0]
normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010])
fo = open('CW/info.txt', 'a')
bs = 32

class testdata(Dataset):
    def __init__(self):
        self.imgpath_list = sorted(os.listdir('t_sample_testdata'),key=lambda x:int(x[:-6]))
    def __getitem__(self, index):
        img = scipy.misc.imread('t_sample_testdata/'+self.imgpath_list[index])
                                          shuffle=True,
                                          **kwargs)

if args.cuda:
    if args.arch == "vgg":
        if args.depth == 16:
            model = VGG(depth=16, init_weights=True, cfg=None)
        elif args.depth == 19:
            model = VGG(depth=19, init_weights=True, cfg=None)
        else:
            sys.exit("vgg doesn't have those depth!")
    elif args.arch == "resnet":
        if args.depth == 18:
            model = ResNet18()
        elif args.depth == 50:
            model = ResNet50()
        else:
            sys.exit("resnet doesn't implement those depth!")
    # elif args.arch == "convnet":
    #     args.depth = 4
    #     model = ConvNet()
    if args.multi_gpu:
        model = torch.nn.DataParallel(model)
    model.cuda()

print(args.distill)
if args.cuda and args.distill:
    #   crete teacher model.
    if args.teacharch == "vgg":
        if args.teachdepth == 16:
            teacher = VGG(depth=16, init_weights=True, cfg=None)
Exemplo n.º 26
0
def main():
    args = get_args()

    if not os.path.exists(args.out_dir):
        os.mkdir(args.out_dir)
    logfile = os.path.join(args.out_dir, 'output.log')
    if os.path.exists(logfile):
        os.remove(logfile)

    logging.basicConfig(format='[%(asctime)s] - %(message)s',
                        datefmt='%Y/%m/%d %H:%M:%S',
                        level=logging.INFO,
                        filename=logfile)
    logger.info(args)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    train_loader, test_loader = get_loaders(args.data_dir, args.batch_size)

    epsilon = (args.epsilon / 255.) / std
    alpha = (args.alpha / 255.) / std

    model = ResNet50().cuda()
    model.train()

    opt = torch.optim.SGD(model.parameters(),
                          lr=args.lr_max,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    amp_args = dict(opt_level=args.opt_level,
                    loss_scale=args.loss_scale,
                    verbosity=False)
    criterion = nn.CrossEntropyLoss()

    lr_steps = args.epochs * len(train_loader)
    if args.lr_schedule == 'cyclic':
        scheduler = torch.optim.lr_scheduler.CyclicLR(
            opt,
            base_lr=args.lr_min,
            max_lr=args.lr_max,
            step_size_up=lr_steps / 2,
            step_size_down=lr_steps / 2)
    elif args.lr_schedule == 'multistep':
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            opt, milestones=[lr_steps / 2, lr_steps * 3 / 4], gamma=0.1)

    # Training
    start_train_time = time.time()
    logger.info('Epoch \t Seconds \t LR \t \t Train Loss \t Train Acc')
    for epoch in range(args.epochs):
        start_epoch_time = time.time()
        train_loss = 0
        train_acc = 0
        train_n = 0
        for i, (X, y) in enumerate(train_loader):
            X, y = X.cuda(), y.cuda()
            delta = torch.zeros_like(X).cuda()
            if args.delta_init == 'random':
                for i in range(len(epsilon)):
                    delta[:, i, :, :].uniform_(-epsilon[i][0][0].item(),
                                               epsilon[i][0][0].item())
                delta.data = clamp(delta, lower_limit - X, upper_limit - X)
            delta.requires_grad = True
            for _ in range(args.attack_iters):
                output = model(X + delta)
                loss = criterion(output, y)
                # with amp.scale_loss(loss, opt) as scaled_loss:
                #    scaled_loss.backward()

                loss.backward()

                grad = delta.grad.detach()
                delta.data = clamp(delta + alpha * torch.sign(grad), -epsilon,
                                   epsilon)
                delta.data = clamp(delta, lower_limit - X, upper_limit - X)
                delta.grad.zero_()
            delta = delta.detach()
            output = model(X + delta)
            loss = criterion(output, y)
            opt.zero_grad()
            # with amp.scale_loss(loss, opt) as scaled_loss:
            #    scaled_loss.backward()
            loss.backward

            opt.step()
            train_loss += loss.item() * y.size(0)
            train_acc += (output.max(1)[1] == y).sum().item()
            train_n += y.size(0)
            scheduler.step()
        epoch_time = time.time()
        lr = scheduler.get_lr()[0]
        logger.info('%d \t %.1f \t \t %.4f \t %.4f \t %.4f', epoch,
                    epoch_time - start_epoch_time, lr, train_loss / train_n,
                    train_acc / train_n)
    train_time = time.time()
    torch.save(model.state_dict(), os.path.join(args.out_dir, 'model.pth'))
    logger.info('Total train time: %.4f minutes',
                (train_time - start_train_time) / 60)

    # Evaluation
    model_test = ResNet50().cuda()
    model_test.load_state_dict(model.state_dict())
    model_test.float()
    model_test.eval()

    pgd_loss, pgd_acc = evaluate_pgd(test_loader, model_test, 50, 10)
    test_loss, test_acc = evaluate_standard(test_loader, model_test)

    logger.info('Test Loss \t Test Acc \t PGD Loss \t PGD Acc')
    logger.info('%.4f \t \t %.4f \t %.4f \t %.4f', test_loss, test_acc,
                pgd_loss, pgd_acc)
Exemplo n.º 27
0
    jf.write(json.dumps(mean_dict))
jf.close()

# del some not useful vars
del data

## build parallel model on 2 GPUs

# opt = Adam(lr=INIT_LR)
opt = SGD(lr=INIT_LR, momentum=0.9)

if args["model"] is None:
    print("[INFO] compiling distributed model...")
    # create model on CPU
    with tf.device('/cpu:0'):
        model = ResNet50(64, 64, 3, classes, reg=5e-4, bnEps=2e-5, bnMom=0.9)
    # create distribute strategy for TF2.0
    strategy = tf.distribute.MirroredStrategy()
    with strategy.scope():
        parallel_model = multi_gpu_model(model, gpus=2)
        parallel_model.compile(loss="categorical_crossentropy",
                               optimizer=opt,
                               metrics=METRICS)

else:
    print("[INFO] loading %s ..." % args["model"])
    # load single GPU model back! need to specify extra metrics!
    with tf.device('/cpu:0'):
        model = load_model(args["model"],
                           custom_objects={"f1_score": f1_score})
Exemplo n.º 28
0
def main():
    import multiprocessing
    multiprocessing.set_start_method('forkserver')

    parser = argparse.ArgumentParser(description='Cats training.')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--normalization',
                        type=str,
                        choices=NORMALIZATIONS,
                        required=True,
                        help='Normalization method')
    args = parser.parse_args()

    gpu = args.gpu
    out_dir = args.out
    image_dir = 'images'

    batch_size = 32
    short_edge = 256
    crop_edge = 224

    seed = 3141592653
    n_processes = len(os.sched_getaffinity(0))

    normalization = get_normalization(args.normalization)

    initial_lr = 0.1
    epochs = 300
    lr_reduce_interval = (100, 'epoch')
    lr_reduce_rate = 0.1
    weight_decay = 5e-4

    numpy_random = numpy.random.RandomState(seed)
    random = Random.from_numpy_random(numpy_random)
    train_dataset, valid_dataset, _ = CatsDataset.train_valid(
        image_dir, short_edge, crop_edge, random)
    order_sampler = iterators.ShuffleOrderSampler(numpy_random)
    train_iter = iterators.MultiprocessIterator(train_dataset,
                                                batch_size,
                                                repeat=True,
                                                shuffle=None,
                                                n_processes=n_processes,
                                                n_prefetch=4,
                                                order_sampler=order_sampler)
    valid_iter = iterators.MultiprocessIterator(valid_dataset,
                                                batch_size,
                                                repeat=False,
                                                shuffle=False,
                                                n_processes=n_processes,
                                                n_prefetch=4)

    numpy.random.seed(seed)
    model = ResNet50(len(CatsDataset.classes), normalization)
    model = chainer.links.Classifier(model)
    if gpu >= 0:
        chainer.cuda.get_device_from_id(gpu).use()
        model.to_gpu()

    optimizer = optimizers.MomentumSGD(lr=initial_lr)
    optimizer.setup(model)
    optimizer.add_hook(optimizer_hooks.WeightDecay(weight_decay))

    updater = training.updaters.StandardUpdater(train_iter,
                                                optimizer,
                                                device=gpu)
    trainer = training.Trainer(updater, (epochs, 'epoch'), out=out_dir)

    trainer.extend(extensions.ExponentialShift('lr', lr_reduce_rate),
                   trigger=lr_reduce_interval)
    trainer.extend(extensions.Evaluator(valid_iter, model, device=gpu),
                   trigger=(1, 'epoch'))

    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    trainer.run()

    chainer.serializers.save_npz(os.path.join(out_dir, 'model.npz'), model)
Exemplo n.º 29
0
# load image
#image_path = os.path.join("X:/model", "test.jpg")
img = Image.open("X:/dataset/公安部大楼/00000.jpg")
plt.imshow(img)
# [N, C, H, W]
img = data_transform(img)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)

# read class_indict
try:
    json_file = open('./class_indices.json', 'r')
    class_indict = json.load(json_file)
except Exception as e:
    print(e)
    exit(-1)

# create model
model = ResNet50()
# load model weights
model_weight_path = "./resNet50.pth"
model.load_state_dict(torch.load(model_weight_path, map_location=device))
model.eval()
with torch.no_grad():
    # predict class
    output = torch.squeeze(model(img))
    predict = torch.softmax(output, dim=0)
    predict_cla = torch.argmax(predict).numpy()
print(class_indict[str(predict_cla)], predict[predict_cla].numpy())
plt.show()
Exemplo n.º 30
0
        autoencoder_alcoholism.eval()
        autoencoder_stimulus.eval()
        autoencoder_id.eval()

    else:
        if opt.classifier == 'ResNet18':
            classifier_alcoholism = ResNet18(num_classes_alc)
            classifier_stimulus = ResNet18(num_classes_stimulus)
            classifier_id = ResNet18(num_classes_id)
        elif opt.classifier == 'ResNet34':
            classifier_alcoholism = ResNet34(num_classes_alc)
            classifier_stimulus = ResNet34(num_classes_stimulus)
            classifier_id = ResNet34(num_classes_id)
        elif opt.classifier == 'ResNet50':
            classifier_alcoholism = ResNet50(num_classes_alc)
            classifier_stimulus = ResNet50(num_classes_stimulus)
            classifier_id = ResNet50(num_classes_id)

        classifier_alcoholism = classifier_alcoholism.to(device)
        classifier_stimulus = classifier_stimulus.to(device)
        classifier_id = classifier_id.to(device)

        if device == 'cuda':
            classifier_alcoholism = torch.nn.DataParallel(
                classifier_alcoholism)
            classifier_stimulus = torch.nn.DataParallel(classifier_stimulus)
            classifier_id = torch.nn.DataParallel(classifier_id)
            cudnn.benchmark = True

        checkpoint_alcoholism = torch.load(