예제 #1
0
def networks(network, **kwargs):
    # ResNet
    if 'resnet' in network and 'pre' not in network:
        depth = int(network[6:])
        return resnet(depth, **kwargs)

    elif 'vgg' in network:
        depth = int(network[3:5])
        if 'bn' in network:
            return vgg_bn(depth, **kwargs)
        else:
            return vgg(depth, **kwargs)

    elif 'wideResNet' in network:
        depth = int(network[10:12])
        widen_factor = int(network[13:])
        return wideResNet(depth, widen_factor, **kwargs)

    elif 'preresnet' in network:
        depth = int(network[9:])
        return preresnet(depth, **kwargs)

    elif 'pyramidnet' in network:
        depth = int(network[10:])
        return pyramidnet(depth, **kwargs)
예제 #2
0
def init_network(config, num_classes):
    kwargs = {'depth': config.depth, 'num_classes': num_classes}
    if config.network == 'vgg':
        if 'avg_pool2d' in config:
            kwargs.update({'avg_pool2d': config.avg_pool2d})
        if 'batch_norm' in config:
            kwargs.update({'batch_norm': config.batch_norm})
        net = vgg(**kwargs)
    elif config.network == 'resnet':
        dataset = config.data_dir.split('/')[-1]
        if 'cifar' in dataset.lower():
            kwargs.update({'dataset': 'cifar'})
        net = resnet(**kwargs)
    elif config.network == 'reskipnet':
        net = reskipnet(**kwargs)
    else:
        raise NotImplementedError
    assert os.path.exists('{}/checkpoint/original'.format(config.result_dir)),\
        'No checkpoint directory for original model!'
    dataset = config.data_dir.split('/')[-1]
    path_to_add = '{}/{}/{}'.format(dataset, config.network, config.depth)
    checkpoint_path, exp_name, epochs = get_best_checkpoint(
        '{}/checkpoint/original/{}'.format(config.result_dir, path_to_add))
    checkpoint = torch.load(checkpoint_path, map_location='cpu')
    net.load_state_dict(checkpoint)
    if torch.cuda.is_available():
        net.cuda()
    return net, exp_name, epochs
예제 #3
0
    def __init__(self,
                 backbone_name='resnet18',
                 in_channels=3,
                 sc_channels=128,
                 load_pretrained=True,
                 norm_type='bn',
                 use_cbam=False):
        super(ContextPath, self).__init__()

        bias = not (norm_type == 'bn' or norm_type == 'gn')

        self.backbone = resnet(name=backbone_name,
                               in_channels=in_channels,
                               load_pretrained=load_pretrained,
                               norm_type=norm_type,
                               use_cbam=use_cbam)
        expansion = self.backbone.block.expansion
        ##self.ppm = PPM(in_channels=512*expansion, pool_sizes=[6, 3, 2, 1], mode='sum', norm_type=norm_type)

        self.arm_32x = ARM(in_channels=512 * expansion,
                           out_channels=sc_channels,
                           norm_type=norm_type)
        self.arm_16x = ARM(in_channels=256 * expansion,
                           out_channels=sc_channels,
                           norm_type=norm_type)
        self.arm_8x = ARM(in_channels=128 * expansion,
                          out_channels=sc_channels,
                          norm_type=norm_type)

        self.cbr_gap = ConvBatchNormRelu(in_channels=512 * expansion,
                                         out_channels=sc_channels,
                                         kernel_size=1,
                                         stride=1,
                                         padding=0,
                                         bias=bias,
                                         norm_type=norm_type)
        ##self.cbr_ppm = ConvBatchNormRelu(in_channels=512*expansion, out_channels=sc_channels, kernel_size=1, stride=1, padding=0, bias=bias, norm_type=norm_type)
        self.cbr_32x = ConvBatchNormRelu(in_channels=sc_channels,
                                         out_channels=sc_channels,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1,
                                         bias=bias,
                                         norm_type=norm_type)
        self.cbr_16x = ConvBatchNormRelu(in_channels=sc_channels,
                                         out_channels=sc_channels,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1,
                                         bias=bias,
                                         norm_type=norm_type)
        self.cbr_8x = ConvBatchNormRelu(in_channels=sc_channels,
                                        out_channels=sc_channels,
                                        kernel_size=3,
                                        stride=1,
                                        padding=1,
                                        bias=bias,
                                        norm_type=norm_type)
예제 #4
0
def main():
    # Check that necessary paths exists, if not create them.
    if not os.path.exists(MODEL_CHECKPOINT_PATH):
        print_info('Creating model checkpoint path...')
        os.makedirs(MODEL_CHECKPOINT_PATH)

    # Get the dataset from the CSV, load it and preprocess the data.
    print_info('Preparing training and validation data...')
    train_img, train_labels, val_img, val_labels = get_training_val_data(
        TRAINING_DATA_CSV_PATH)

    # Plot the distribution.
    # view_label_distribution(train_labels, title='Emotions in training set')
    # view_label_distribution(val_labels, title='Emotions in validation set')

    # Calculate class weights.
    print_info('Calculating class weights...')
    train_weights = calculate_class_weights(train_labels)

    # Load and compile the model with the correct input shape and number of classes.
    print_info('Loading model...')
    input_shape = train_img[0].shape
    num_of_classes = len(np.unique(train_labels))
    model = resnet(input_shape, num_of_classes)
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    # Calculate the number of steps taken during training.
    training_steps = len(train_img) / BATCH_SIZE

    # Add all callbacks in a list. Will be used during training.
    save_path = MODEL_CHECKPOINT_PATH + 'weights_{epoch:03d}.hdf5'
    checkpoint = ModelCheckpoint(filepath=save_path,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True)
    callbacks = [checkpoint]

    # Create an ImageDataGenerator. Eventual augmentations are done here.
    data_generator = ImageDataGenerator(rotation_range=20,
                                        horizontal_flip=True,
                                        width_shift_range=0.2,
                                        height_shift_range=0.2)

    # Train the model.
    print_info('Starting training...')
    model.fit_generator(data_generator.flow(train_img, train_labels),
                        steps_per_epoch=training_steps,
                        class_weight=train_weights,
                        epochs=EPOCHS,
                        validation_data=(val_img, val_labels),
                        shuffle=True,
                        callbacks=callbacks,
                        use_multiprocessing=False,
                        workers=multiprocessing.cpu_count())
def predict_agument(data, **kwargs):
    """
        模型预测,单张图片复制
        :param
            data(tensor) -- 图片数据
        :kwargs

        :return(numpy)
            预测结果
    """
    # 选择运行的cpu或gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载模型
    models = []
    for k, v in kwargs.items():
        if k == '1':
            model = MFFNet()
        elif k == '2':
            # 299 x 299
            model = inception(pretrained = False)
        elif k == '3':
            model = resnet(pretrained = False)
        elif k == '4':
            model = densenet(pretrained = False)
        elif k == '5':
            model = senet(pretrained = False)
        # 加载权重
        model.load_state_dict(torch.load(v))
        model = model.to(device)
        # 测试模式
        model.eval()
        models.append(model)

    # 使用平均策略获取集成模型输出
    data = data.to(device)
    sum = None
    # 平均策略
    for model in models:
        output = model(data)
        output = output.detach()
        val = torch.zeros(7)
        for i in range(output.size(0)):
            val = val + output[i]
        val = val / output.size(0)

        if sum is None:
            sum = val
        else:
            sum += val
    val = sum / len(models)
    _, a = torch.max(val, 0)

    return a.item()
예제 #6
0
def main():
    if "resnet" in args.model:
        if args.model == "resnet18":
            model, model_criterion, model_optimizer, model_scheduler = models.resnet(
                18, args.learning_rate, args.momentum)
        elif args.model == "resnet101":
            model, model_criterion, model_optimizer, model_scheduler = models.resnet(
                101, args.learning_rate, args.momentum)
        elif args.model == "resnet152":
            model, model_criterion, model_optimizer, model_scheduler = models.resnet(
                152, args.learning_rate, args.momentum)
        else:
            print("Using default resnet model: resnet50")
            model, model_criterion, model_optimizer, model_scheduler = models.resnet(
                50, args.learning_rate, args.momentum)
        model = train_resnet(model,
                             model_criterion,
                             model_optimizer,
                             model_scheduler,
                             epochs=args.epochs)
        predict_model(model)
    elif args.model == "chexnet":
        model, model_criterion, model_optimizer = models.chexnet(
            args.learning_rate, args.momentum, args.weight_decay)
        model = train_chexnet(model,
                              model_criterion,
                              model_optimizer,
                              epochs=args.epochs,
                              learning_rate=args.learning_rate,
                              weight_decay=args.weight_decay)
        predict_model(model)
    elif args.model == "squeezenet":
        model, model_criterion, model_optimizer = models.squeezenet(
            args.learning_rate, args.momentum, args.weight_decay)
        model = train_squeezenet(model,
                                 model_criterion,
                                 model_optimizer,
                                 num_epochs=args.epochs)
        predict_model(model)
    else:
        print("Please input a valid model")
예제 #7
0
def run_dataset(file_path, checkpoint, idx_to_species, cuda):
    # Load Data
    input_size = (600, 600)
    mean = [0.485, 0.456, 0.406][::-1]
    std = [0.229, 0.224, 0.225][::-1]

    data_transform = dataset.Compose([
        dataset.ResizeCV(input_size),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    batch_size = 16
    ds = dataset.SnakeDataset(file_path, data_transform)
    dataloader = torch.utils.data.DataLoader(ds,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=4)

    # Load model & checkpoint
    model = models.resnet(num_classes=45,
                          pretrained=True,
                          resnet_model='resnet18',
                          add_stn=False)
    model = utils.load_model(checkpoint, model)

    if cuda:
        model = model.cuda()

    model = model.eval()

    submit = []
    keys = ['filename']
    for i in range(len(idx_to_species)):
        keys += [idx_to_species[i]]
    print(keys)
    submit.append(keys)
    for batch_idx, (x, _, names) in enumerate(dataloader):
        print(batch_idx, '/', len(ds) / batch_size)
        if cuda:
            x = x.cuda()
        outputs = model(x)
        scores = F.softmax(outputs, dim=1).cpu().data.numpy()
        for i, name in enumerate(names):
            row = [os.path.basename(name)]
            for j in range(scores[i].shape[0]):
                row += [str(scores[i, j])]
            submit.append(row)

    submission = file_path.replace('.pkl', '.csv')
    with open(submission, 'wb') as csvfile:
        writer = csv.writer(csvfile, delimiter=',')
        for row in submit:
            writer.writerow(row)
예제 #8
0
def process():

    input_path = generate_random_filename(upload_directory,"jpg")
    output_path = generate_random_filename(upload_directory,"jpg")

    try:

        url = request.json["url"]
        # phone: iphone, blackberr or sony
        phone = request.json["phone"]
        # resolution: orig,high,medium,small,tiny
        resolution = request.json["resolution"]

        download(url, input_path)
       
        # get the specified image resolution
        IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_SIZE = utils.get_specified_res(res_sizes, phone, resolution)

        # create placeholders for input images
        x_ = tf.placeholder(tf.float32, [None, IMAGE_SIZE])
        x_image = tf.reshape(x_, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 3])
            
        # generate enhanced image
        enhanced = resnet(x_image)


        with tf.Session(config=config) as sess:
            saver = tf.train.Saver()
            saver.restore(sess, "models_orig/" + phone + "_orig")
            image = np.float16(misc.imresize(misc.imread(filename), res_sizes[phone])) / 255
            image_crop = utils.extract_crop(image, resolution, phone, res_sizes)
            image_crop_2d = np.reshape(image_crop, [1, IMAGE_SIZE])
            enhanced_2d = sess.run(enhanced, feed_dict={x_: image_crop_2d})
            enhanced_image = np.reshape(enhanced_2d, [IMAGE_HEIGHT, IMAGE_WIDTH, 3])
            misc.imsave(filename, enhanced_image)
    
        callback = send_file(output_path, mimetype='image/jpeg')

        return callback, 200


    except:
        traceback.print_exc()
        return {'message': 'input error'}, 400

    finally:
        clean_all([
            input_path,
            output_path
            ])
def predict(data, **kwargs):
    """
    模型预测,单张图片不复制
    :param
        data(tensor) -- 图片数据
    :kwargs

    :return(numpy)
        预测结果
    """
    # 选择运行的cpu或gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载模型
    models = []
    for k, v in kwargs.items():
        if k == '1':
            model = MFFNet()
        elif k == '2':
            # 299 x 299
            model = inception(pretrained = False)
        elif k == '3':
            model = resnet(pretrained = False)
        elif k == '4':
            model = densenet(pretrained = False)
        elif k == '5':
            model = senet(pretrained = False)
        # 加载权重
        model.load_state_dict(torch.load(v))
        model = model.to(device)
        # 测试模式
        model.eval()
        models.append(model)

    # 使用平均策略获取集成模型输出
    data = data.to(device)
    output = None
    # 平均策略
    for model in models:
        if output is None:
            output = model(data).detach()
        else:
            output += model(data).detach()
    output = output / len(models)
    _, a = torch.max(output, 1)
    a = a.cpu().detach().numpy()

    # 预测结果
    return a
예제 #10
0
def get_network(args, depth=10, width=10):
    """ return given network
    """
    if args.task == 'cifar10':
        nclass = 10
    elif args.task == 'cifar100':
        nclass = 100
    #Yang added none bn vggs
    if args.net == 'vgg11':
        if args.batch_norm:
            net = vgg11_bn(num_classes=nclass)
        else:
            net = vgg11(num_classes=nclass)
    elif args.net == 'vgg13':
        if args.batch_norm:
            net = vgg13_bn(num_classes=nclass)
        else:
            net = vgg13(num_classes=nclass)
    elif args.net == 'vgg16':
        if args.batch_norm:
            net = vgg16_bn(num_classes=nclass)
        else:
            net = vgg16(num_classes=nclass)
    elif args.net == 'vgg19':
        if args.batch_norm:
            net = vgg19_bn(num_classes=nclass)
        else:
            net = vgg19(num_classes=nclass)

    elif args.net == 'resnet':
        net = resnet(num_classes=nclass, depth=depth, width=width)
    # elif args.net == 'resnet34':
    #     net = resnet34(num_classes=nclass)
    # elif args.net == 'resnet50':
    #     net = resnet50(num_classes=nclass)
    # elif args.net == 'resnet101':
    #     net = resnet101(num_classes=nclass)
    # elif args.net == 'resnet152':
    #     net = resnet152(num_classes=nclass)

    else:
        print('the network name you have entered is not supported yet')
        sys.exit()

    if args.gpu:  #use_gpu
        net = net.cuda()

    return net
예제 #11
0
def train():
    print(lr)
    train_size = len(train_loader)

    total_loss = 0.0

    total = 0.0
    correct = 0.0
    n = 0

    for i, (inputs, targets) in enumerate(train_loader):
        # Convert from list of 3D to 4D
        inputs = np.stack(inputs, axis=1)

        #plt.imshow(np.transpose(inputs[0][0], (1, 2, 0)))
        #plt.show()

        inputs = torch.from_numpy(inputs)

        inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs), Variable(targets)

        # compute output
        outputs = resnet(inputs)
        loss = criterion(outputs, targets)

        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += (predicted.cpu() == targets.cpu()).sum()
        total_loss += loss.item()
        n += 1

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (i + 1) % 5 == 0:
            print("\tIter [%d/%d] Loss: %.4f" %
                  (i + 1, train_size, loss.item()))

    avg_acc = 100 * correct / total

    train_loss.append(total_loss / len(train_loader))
    train_acc.append(avg_acc)
예제 #12
0
def eval(data_loader, is_test=False):
    if is_test:
        load_checkpoint()

    # Eval
    total = 0.0
    correct = 0.0

    total_loss = 0.0
    n = 0

    start = time.time()

    for i, (inputs, targets) in enumerate(data_loader):
        with torch.no_grad():
            # Convert from list of 3D to 4D
            inputs = np.stack(inputs, axis=1)

            inputs = torch.from_numpy(inputs)

            inputs, targets = inputs.cuda(), targets.cuda()
            inputs, targets = Variable(inputs), Variable(targets)

            # compute output
            outputs = resnet(inputs)
            loss = criterion(outputs, targets)

            total_loss += loss
            n += 1

            _, predicted = torch.max(outputs.data, 1)
            total += targets.size(0)
            correct += (predicted.cpu() == targets.cpu()).sum()

    time_taken = time.time() - start
    print('Time: ', time_taken)
    print('Avg: ', time_taken / len(data_loader) * 1000, 'ms')

    avg_acc = 100 * correct / total
    avg_loss = total_loss / len(val_loader)

    val_loss.append(avg_loss)
    val_acc.append(avg_acc)

    return avg_acc, avg_loss
예제 #13
0
def init_network(args, num_classes):
    kwargs = {'depth': args.depth, 'num_classes': num_classes}
    depth = str(args.depth)
    if args.network == 'vgg':
        kwargs.update({'avg_pool2d': args.avg_pool2d})
        kwargs.update({'batch_norm': True})
        net = vgg(**kwargs)
    elif args.network == 'resnet':
        dataset = args.data_dir.split('/')[-1]
        if 'cifar' in dataset.lower():
            kwargs.update({'dataset': 'cifar'})
        net = resnet(**kwargs)
    elif args.network == 'reskipnet':
        net = reskipnet(**kwargs)
    elif args.network == 'resnext':
        dataset = args.data_dir.split('/')[-1]
        if 'cifar' in dataset.lower():
            kwargs.update({'dataset': 'cifar'})
        kwargs.update({
            'cardinality': args.cardinality,
            'base_width': args.base_width
        })
        net = resnext(**kwargs)
        depth = '{}_{}_{}d'.format(depth, args.cardinality, args.base_width)
    else:
        raise NotImplementedError
    if args.resume:
        dataset = args.data_dir.split('/')[-1]
        path_to_add = '{}/{}/{}'.format(dataset, args.network, depth)
        assert os.path.exists('{}/checkpoint/original/{}'.format(args.result_dir, path_to_add)),\
            'No checkpoint directory for original model!'
        checkpoint_path, _, previous_epochs = get_best_checkpoint(
            '{}/checkpoint/original/{}'.format(args.result_dir, path_to_add))
        checkpoint = torch.load(checkpoint_path, map_location='cpu')
        net.load_state_dict(checkpoint)
        previous_accuracy = checkpoint_path.split('_')[-2]
    else:
        previous_accuracy = 0
        previous_epochs = 0
    if torch.cuda.is_available():
        net.cuda()
    return net, float(previous_accuracy), previous_epochs, depth
예제 #14
0
파일: selector.py 프로젝트: bboylyg/NAD
def select_model(dataset,
                 model_name,
                 pretrained=False,
                 pretrained_models_path=None,
                 n_classes=10):

    assert model_name in ['WRN-16-1', 'WRN-16-2', 'WRN-40-1', 'WRN-40-2', 'ResNet34', 'WRN-10-2', 'WRN-10-1']
    if model_name=='WRN-16-1':
        model = WideResNet(depth=16, num_classes=n_classes, widen_factor=1, dropRate=0)
    elif model_name=='WRN-16-2':
        model = WideResNet(depth=16, num_classes=n_classes, widen_factor=2, dropRate=0)
    elif model_name=='WRN-40-1':
        model = WideResNet(depth=40, num_classes=n_classes, widen_factor=1, dropRate=0)
    elif model_name=='WRN-40-2':
        model = WideResNet(depth=40, num_classes=n_classes, widen_factor=2, dropRate=0)
    elif model_name == 'WRN-10-2':
        model = WideResNet(depth=10, num_classes=n_classes, widen_factor=2, dropRate=0)
    elif model_name == 'WRN-10-1':
        model = WideResNet(depth=10, num_classes=n_classes, widen_factor=1, dropRate=0)
    elif model_name=='ResNet34':
        model = resnet(depth=32, num_classes=n_classes)
    else:
        raise NotImplementedError

    if pretrained:
        model_path = os.path.join(pretrained_models_path)
        print('Loading Model from {}'.format(model_path))
        checkpoint = torch.load(model_path, map_location='cpu')
        print(checkpoint.keys())
        model.load_state_dict(checkpoint['state_dict'])

        #print("=> loaded checkpoint '{}' (epoch {}) (accuracy {})".format(model_path, checkpoint['epoch'], checkpoint['best_prec']))
        print("=> loaded checkpoint '{}' (epoch {}) ".format(model_path, checkpoint['epoch']))


    return model
예제 #15
0
def eval(data_loader, is_test=False):
    if is_test:
        load_checkpoint()

    # Eval
    total = 0.0
    correct = 0.0

    total_loss = 0.0
    n = 0

    for i, (inputs, targets) in enumerate(data_loader):
        with torch.no_grad():
            # Convert from list of 3D to 4D
            inputs = np.stack(inputs, axis=1)

            inputs = torch.from_numpy(inputs)

            inputs, targets = inputs.cuda(), targets.cuda()
            inputs, targets = Variable(inputs), Variable(targets)

            # compute output
            outputs = resnet(inputs)
            loss = criterion(outputs, targets)

            total_loss += loss
            n += 1

            _, predicted = torch.max(outputs.data, 1)
            total += targets.size(0)
            correct += (predicted.cpu() == targets.cpu()).sum()

    avg_test_acc = 100 * correct / total
    avg_loss = total_loss / n

    return avg_test_acc, avg_loss
예제 #16
0
def train():
    train_size = len(train_loader)

    for i, (inputs, targets) in enumerate(train_loader):
        # Convert from list of 3D to 4D
        inputs = np.stack(inputs, axis=1)

        inputs = torch.from_numpy(inputs)

        inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs), Variable(targets)

        # compute output
        outputs = resnet(inputs)
        loss = criterion(outputs, targets)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (i + 1) % args.print_freq == 0:
            print("\tIter [%d/%d] Loss: %.4f" %
                  (i + 1, train_size, loss.item()))
예제 #17
0
파일: main.py 프로젝트: at553/tfdepth
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Learning rate')
flags.DEFINE_integer('batch_size', 25, 'Batch size')

X_train, Y_train, X_test, Y_test = load_data()

batch_size = 128

X = tf.placeholder("float", [batch_size, 32, 32, 3])
Y = tf.placeholder("float", [batch_size, 10])
learning_rate = tf.placeholder("float", [])


net = models.resnet(X, 20)


cross_entropy = -tf.reduce_sum(Y*tf.log(net))
opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
train_op = opt.minimize(cross_entropy)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

saver = tf.train.Saver()

for j in range (10):
예제 #18
0
                    help='depth of the resnet')
parser.add_argument('--percent', type=float, default=0.5,
                    help='scale sparse rate (default: 0.5)')
parser.add_argument('--model', default='./logs/checkpoint_resnet.pth.tar', type=str, metavar='PATH',
                    help='path to the model (default: none)')
parser.add_argument('--save', default='./logs', type=str, metavar='PATH',
                    help='path to save pruned model (default: none)')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.cuda = False

if not os.path.exists(args.save):
    os.makedirs(args.save)

model = resnet(depth=args.depth, dataset=args.dataset)

if args.cuda:
    model.cuda()
if args.model:
    if os.path.isfile(args.model):
        print("=> loading checkpoint '{}'".format(args.model))
        checkpoint = torch.load(args.model)
        args.start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
              .format(args.model, checkpoint['epoch'], best_prec1))
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))
예제 #19
0
    loss_discrim1 = []
    loss_generator1 = []
    loss_mse1 = []
    loss_psnr1 = []
    loss_texture1 = []
    loss_tv1 = []
    discim_accuracy1 = []

    # get processed enhanced image
    for i in range(0, GPU_NUM):
        with tf.device("/gpu:{}".format(i)), tf.variable_scope(
                name_or_scope=tf.get_variable_scope(),
                # re-using variables across GPUs.
                reuse=True or (i > 0)):
            print("Building graph on GPU:{}".format(i))
            enhanced = models.resnet(phone_image[i])

            # transform both dslr and enhanced images to grayscale

            enhanced_gray = tf.reshape(tf.image.rgb_to_grayscale(enhanced),
                                       [-1, PATCH_WIDTH * PATCH_HEIGHT])
            dslr_gray = tf.reshape(tf.image.rgb_to_grayscale(dslr_image[i]),
                                   [-1, PATCH_WIDTH * PATCH_HEIGHT])

            # push randomly the enhanced or dslr image to an adversarial CNN-discriminator

            adversarial_ = tf.multiply(
                enhanced_gray, 1 - adv_[i]) + tf.multiply(dslr_gray, adv_[i])
            adversarial_image = tf.reshape(adversarial_,
                                           [-1, PATCH_HEIGHT, PATCH_WIDTH, 1])
예제 #20
0
torch.cuda.manual_seed(args.seed)


for arg in vars(args):
    print(arg, getattr(args, arg))
if not os.path.isdir('checkpoint/'):
    os.makedirs('checkpoint/')
# get dataset
train_loader, test_loader = getData(
    name='cifar10', train_bs=args.batch_size, test_bs=args.test_batch_size)

# make sure to use cudnn.benchmark for second backprop
cudnn.benchmark = True

# get model and optimizer
model = resnet(num_classes=10, depth=args.depth).cuda()
print(model)
model = torch.nn.DataParallel(model)
print('    Total params: %.2fM' % (sum(p.numel()
                                       for p in model.parameters()) / 1000000.0))

criterion = nn.CrossEntropyLoss()
if args.optimizer == 'sgd':
    optimizer = optim.SGD(
        model.parameters(),
        lr=args.lr,
        momentum=0.9,
        weight_decay=args.weight_decay)
elif args.optimizer == 'adam':
    optimizer = optim.Adam(
        model.parameters(),
예제 #21
0
    def __init__(self,
                 n_classes=1,
                 backbone_name='resnet18',
                 in_channels=3,
                 load_pretrained=True,
                 norm_type='bn',
                 use_cbam=False,
                 p=0.1,
                 dam_scale=1):
        super(unet, self).__init__()

        bias = not (norm_type == 'bn' or norm_type == 'gn')

        backbone_name = 'resnet18' if backbone_name is None else backbone_name

        self.dropout = nn.Dropout2d(p=p)

        self.backbone = resnet(name=backbone_name,
                               in_channels=in_channels,
                               load_pretrained=load_pretrained,
                               norm_type=norm_type,
                               use_cbam=use_cbam)
        expansion = self.backbone.block.expansion
        self.ppm = PPM(in_channels=512 * expansion,
                       pool_sizes=[6, 3, 2, 1],
                       mode='sum',
                       norm_type=norm_type)

        self.c1br_16x = ConvBatchNormRelu(in_channels=256 * expansion,
                                          out_channels=128 * expansion,
                                          kernel_size=1,
                                          stride=1,
                                          padding=0,
                                          bias=bias,
                                          norm_type=norm_type)
        self.c1br_8x = ConvBatchNormRelu(in_channels=128 * expansion,
                                         out_channels=64 * expansion,
                                         kernel_size=1,
                                         stride=1,
                                         padding=0,
                                         bias=bias,
                                         norm_type=norm_type)
        self.c1br_4x = ConvBatchNormRelu(in_channels=64 * expansion,
                                         out_channels=32 * expansion,
                                         kernel_size=1,
                                         stride=1,
                                         padding=0,
                                         bias=bias,
                                         norm_type=norm_type)
        self.c1br_2x = ConvBatchNormRelu(in_channels=64 * expansion,
                                         out_channels=16 * expansion,
                                         kernel_size=1,
                                         stride=1,
                                         padding=0,
                                         bias=bias,
                                         norm_type=norm_type)

        self.c3br_ppm = ConvBatchNormRelu(in_channels=512 * expansion,
                                          out_channels=128 * expansion,
                                          kernel_size=3,
                                          stride=1,
                                          padding=1,
                                          bias=bias,
                                          norm_type=norm_type)
        self.c3br2_16x = nn.Sequential(
            ConvBatchNormRelu(in_channels=256 * expansion,
                              out_channels=64 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type),
            ConvBatchNormRelu(in_channels=64 * expansion,
                              out_channels=64 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type),
        )
        self.c3br2_8x = nn.Sequential(
            ConvBatchNormRelu(in_channels=128 * expansion,
                              out_channels=32 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type),
            ConvBatchNormRelu(in_channels=32 * expansion,
                              out_channels=32 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type),
        )
        self.c3br2_4x = nn.Sequential(
            ConvBatchNormRelu(in_channels=64 * expansion,
                              out_channels=16 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type),
            ConvBatchNormRelu(in_channels=16 * expansion,
                              out_channels=16 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type),
        )
        self.c3br2_2x = nn.Sequential(
            ConvBatchNormRelu(in_channels=32 * expansion,
                              out_channels=8 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type),
            ConvBatchNormRelu(in_channels=8 * expansion,
                              out_channels=8 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type),
        )

        #"""
        self.pam = PAM(in_channels=32 * expansion,
                       reduction_ratio=2,
                       norm_type=norm_type,
                       scale=dam_scale)
        self.cam = CAM(scale=dam_scale)
        self.c3br_pam = ConvBatchNormRelu(in_channels=32 * expansion,
                                          out_channels=32 * expansion,
                                          kernel_size=3,
                                          stride=1,
                                          padding=1,
                                          bias=bias,
                                          norm_type=norm_type)
        self.c3br_cam = ConvBatchNormRelu(in_channels=32 * expansion,
                                          out_channels=32 * expansion,
                                          kernel_size=3,
                                          stride=1,
                                          padding=1,
                                          bias=bias,
                                          norm_type=norm_type)
        #"""

        # (Auxiliary) Classifier
        self.gamma_gap = nn.Parameter(torch.zeros(1))
        self.aux_cls_gap = nn.Sequential(
            ConvBatchNormRelu(in_channels=128 * expansion,
                              out_channels=32 * expansion,
                              kernel_size=1,
                              stride=1,
                              padding=0,
                              bias=bias,
                              norm_type=norm_type), self.dropout,
            nn.Conv2d(32 * expansion,
                      n_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.aux_cls_ppm = nn.Sequential(
            ConvBatchNormRelu(in_channels=128 * expansion,
                              out_channels=32 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type), self.dropout,
            nn.Conv2d(32 * expansion,
                      n_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.aux_cls_4x = nn.Sequential(
            ConvBatchNormRelu(in_channels=32 * expansion,
                              out_channels=8 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type), self.dropout,
            nn.Conv2d(8 * expansion,
                      n_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))
        self.cls = nn.Sequential(
            ConvBatchNormRelu(in_channels=8 * expansion,
                              out_channels=8 * expansion,
                              kernel_size=3,
                              stride=1,
                              padding=1,
                              bias=bias,
                              norm_type=norm_type), self.dropout,
            nn.Conv2d(8 * expansion,
                      n_classes,
                      kernel_size=1,
                      stride=1,
                      padding=0,
                      bias=True))

        self._init_weights(load_pretrained=load_pretrained)
예제 #22
0
def main(args):

    run_idx = sys.argv[1]
    
    # Paths
    plotpath = path_prefix+'classifier/Plots/'
    modelpath = path_prefix+'classifier/Models/'

    # metadata
    layers = ["EMB1", "EMB2", "EMB3", "TileBar0", "TileBar1", "TileBar2"]
    cell_size_phi = [0.098, 0.0245, 0.0245, 0.1, 0.1, 0.1]
    cell_size_eta = [0.0031, 0.025, 0.05, 0.1, 0.1, 0.2]
    len_phi = [4, 16, 16, 4, 4, 4]
    len_eta = [128, 16, 8, 4, 4, 2]
    cell_shapes = {layers[i]:(len_eta[i],len_phi[i]) for i in range(len(layers))}

    # Get the data.
    inputpath = path_prefix+'data/pion/'
    rootfiles = ["pi0", "piplus", "piminus"]
    branches = ['runNumber', 'eventNumber', 'truthE', 'truthPt', 'truthEta', 'truthPhi', 'clusterIndex', 'nCluster', 'clusterE', 'clusterECalib', 'clusterPt', 'clusterEta', 'clusterPhi', 'cluster_nCells', 'cluster_sumCellE', 'cluster_ENG_CALIB_TOT', 'cluster_ENG_CALIB_OUT_T', 'cluster_ENG_CALIB_DEAD_TOT', 'cluster_EM_PROBABILITY', 'cluster_HAD_WEIGHT', 'cluster_OOC_WEIGHT', 'cluster_DM_WEIGHT', 'cluster_CENTER_MAG', 'cluster_FIRST_ENG_DENS', 'cluster_cell_dR_min', 'cluster_cell_dR_max', 'cluster_cell_dEta_min', 'cluster_cell_dEta_max', 'cluster_cell_dPhi_min', 'cluster_cell_dPhi_max', 'cluster_cell_centerCellEta', 'cluster_cell_centerCellPhi', 'cluster_cell_centerCellLayer', 'cluster_cellE_norm']

    trees = {
        rfile : ur.open(inputpath+rfile+".root")['ClusterTree']
        for rfile in rootfiles
    }
    pdata = {
        ifile : itree.pandas.df(branches, flatten=False)
        for ifile, itree in trees.items()
    }

    # Selecting events -- making sure we have as much signal as background.

    n_indices = {}
    n_max = int(np.min(np.array([len(pdata[key]) for key in trees.keys()])))
    rng = np.random.default_rng()

    # If we have a piminus key, assume the dataset are piplus, piminus, pi0
    if('piminus' in trees.keys()):
        n_indices['piplus']  = int(np.ceil((n_max / 2)))
        n_indices['piminus'] = int(np.floor((n_max / 2)))
        n_indices['pi0']     = n_max
    
    # Otherwise, assume we already have piplus (or piplus + piminus) and pi0, no merging needed
    else: n_indices = {key:n_max for key in trees.keys}
    indices = {key:rng.choice(len(pdata[key]), n_indices[key], replace=False) for key in trees.keys()}

    # Make a boolean array version of our indices, since pandas is weird and doesn't handle non-bool indices?
    bool_indices = {}
    for key in pdata.keys():
        bool_indices[key] = np.full(len(pdata[key]), False)
        bool_indices[key][indices[key]] = True

    # Apply the (bool) indices to pdata
    for key in trees.keys():
        pdata[key] = pdata[key][bool_indices[key]]

    # prepare pcells -- immediately apply our selected indices
    pcells = {
        ifile : {
            layer : mu.setupCells(itree, layer, indices = indices[ifile])
            for layer in layers
        }
        for ifile, itree in trees.items()
    }

    # Now with the data extracted from the trees into pcells, we merge pdata and pcells as needed.
    # Note the order in which we concatenate things: piplus -> piplus + piminus.
    if('piminus' in trees.keys()):
    
        # merge pdata
        pdata['piplus'] = pdata['piplus'].append(pdata['piminus'])
        del pdata['piminus']
    
        # merge contents of pcells
        for layer in layers:
            pcells['piplus'][layer] = np.row_stack((pcells['piplus'][layer],pcells['piminus'][layer]))
        del pcells['piminus']
        
    # Now split things into training/validation/testing data.
    training_dataset = ['pi0','piplus']

    # create train/validation/test subsets containing 70%/10%/20%
    # of events from each type of pion event
    for p_index, plabel in enumerate(training_dataset):
        mu.splitFrameTVT(pdata[plabel],trainfrac=0.7)
        pdata[plabel]['label'] = p_index

    # merge signal and background now
    pdata_merged = pd.concat([pdata[ptype] for ptype in training_dataset])
    pcells_merged = {
        layer : np.concatenate([pcells[ptype][layer]
                                for ptype in training_dataset])
        for layer in layers
    }
    plabels = np_utils.to_categorical(pdata_merged['label'],len(training_dataset))


    # Tensorflow setup.
    ngpu = 1
    gpu_list = ["/gpu:"+str(i) for i in range(ngpu)]
    strategy = tf.distribute.MirroredStrategy(devices=gpu_list)
    ngpu = strategy.num_replicas_in_sync
    print ('Number of devices: {}'.format(ngpu))

    models = {}
    model_history = {}
    model_scores = {}
    model_performance = {}


    # Prepare the ResNet model.
    tf.keras.backend.set_image_data_format('channels_last')
    lr = 5e-5
    input_shape = (128,16)
    model_resnet = resnet(strategy, lr=lr)(input_shape)


    # Minor extra data prep -- key names match those defined within resnet model in models.py!
    pcells_merged_unflattened = {'input' + str(i):pcells_merged[key].reshape(tuple([-1] + list(cell_shapes[key]))) for i,key in enumerate(pcells_merged.keys())}
    rn_train = {key:val[pdata_merged.train] for key,val in pcells_merged_unflattened.items()}
    rn_valid = {key:val[pdata_merged.val] for key,val in pcells_merged_unflattened.items()}
    rn_test = {key:val[pdata_merged.test] for key,val in pcells_merged_unflattened.items()}

    nepochs = 10
    batch_size = 20 * ngpu
    verbose = 1 # 2 for a lot of printouts

    model_key = 'resnet'
    rn_dir = modelpath + 'resnet' # directory for saving ResNet
    models[model_key] = model_resnet

    # train+validate model
    model_history[model_key] = models[model_key].fit(
        x=rn_train,
        y=plabels[pdata_merged.train],
        validation_data=(
            rn_valid,
            plabels[pdata_merged.val]
        ),
        epochs=nepochs,
        batch_size=batch_size,
        verbose=verbose
    )
    
    model_history[model_key] = model_history[model_key].history
    
    # get overall performance metric
    model_performance[model_key] = models[model_key].evaluate(
        x=rn_test,
        y=plabels[pdata_merged.test],
        verbose=0
    )
    
    # get network scores for the dataset
    model_scores[model_key] = models[model_key].predict(
        pcells_merged_unflattened
    )

    try: os.makedirs(rn_dir)
    except: pass

    models[model_key].save(rn_dir + '/' + 'resnet_{}.h5'.format(run_idx))

    with open(rn_dir + '/' + 'resnet_{}.history'.format(run_idx),'wb') as model_history_file:
        pickle.dump(model_history[model_key], model_history_file)
    
    print('Done.')
예제 #23
0
transform_train = transforms.Compose([
    transforms.ToTensor(),
])

trainset = datasets.CIFAR10(root='../data',
                            train=True,
                            download=True,
                            transform=transform_train)
hessian_loader = torch.utils.data.DataLoader(trainset,
                                             batch_size=128,
                                             shuffle=True)

# get model and optimizer
model_list = {
    'c1': c1_model(),
    'ResNet': resnet(depth=args.depth),
}

model = model_list[args.arch].cuda()
model = torch.nn.DataParallel(model)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)

########### training
if args.method == 'absa':
    model, num_updates = hf_optm.absa(model,
                                      train_loader,
                                      hessian_loader,
                                      test_loader,
                                      criterion,
def trainer():
    global load_thread
    load_thread.start()
    X = tf.placeholder("float", [None, 224, 224, 3])
    Y = tf.placeholder("float", [None, n_classes])

    # ResNet Models
    pred = models.resnet(X, 56)
    #net = models.resnet(X, 32)
    # net = models.resnet(X, 44)
    # net = models.resnet(X, 56)

    cost = -tf.reduce_sum(Y * tf.log(pred))
    #cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, Y))
    optimizer = tf.train.GradientDescentOptimizer(
        learning_rate=learning_rate).minimize(cost)

    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    info2 = tf.argmax(pred, 1)

    # 初始化所有的共享变量
    init = tf.initialize_all_variables()
    # 开启一个训练

    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        global step
        global data_queue
        global lr
        lr = 1e-4
        step = 0
        #saver.restore(sess, "/home/jess/Disk1/ilsvrc12/tf_vgg_ccnn_model_iter27501.ckpt")
        #step = 27501
        # Keep training until reach max iterations

        while step * batch_size < training_iters:
            epcho1 = np.floor((step * batch_size) / glen1)
            if (((step * batch_size) % glen1 < batch_size) & (epcho1 > 0)):
                lr /= 10

            batch_xs, batch_ys = data_queue.get()
            # 获取批数据
            sess.run(optimizer,
                     feed_dict={
                         X: batch_xs,
                         Y: batch_ys,
                         keep_prob: dropout,
                         learning_rate: lr
                     })
            if (step % 2500 == 1):
                save_path = saver.save(
                    sess,
                    "/home/jess/Disk1/ilsvrc12/tf_resnet_ccnn_model_iter" +
                    str(step) + ".ckpt",
                    global_step=step)
                print("Model saved in file at iteration %d: %s" %
                      (step * batch_size, save_path))

            if step % display_step == 1:

                # 计算损失值
                loss = sess.run(cost,
                                feed_dict={
                                    X: batch_xs,
                                    Y: batch_ys,
                                    keep_prob: 1.
                                })
                acc = sess.run(accuracy,
                               feed_dict={
                                   X: batch_xs,
                                   Y: batch_ys,
                                   keep_prob: 1.
                               })
                #info=sess.run(info2,feed_dict={X:batch_xs, keep_prob:1.})
                #print(info)
                print "Iter=" + str(step * batch_size) + "/epcho=" + str(
                    np.floor(
                        (step * batch_size) /
                        glen1)) + ", Loss= " + "{:.6f}".format(
                            loss) + ", Training Accuracy=" + "{:.5f}".format(
                                acc) + ", lr=" + str(lr)
            step += 1

        print "Optimization Finished!"
        #saver.save(sess, 'jigsaw', global_step=step)
        save_path = saver.save(
            sess,
            "/home/jess/Disk1/ilsvrc12/tf_resnet_ccnn_model.ckpt",
            global_step=step)
        print("Model saved in file: %s" % save_path)

        # 计算测试精度
        batch_xs, batch_ys = imagenet_batch(4096)
        print "Testing Accuracy:", sess.run(accuracy,
                                            feed_dict={
                                                x: batch_xs,
                                                y: batch_ys,
                                                keep_prob: 1.
                                            })
def stylize(initial,
            initial_noiseblend,
            content,
            styles,
            preserve_colors,
            iterations,
            content_weight,
            content_weight_blend,
            style_weight,
            style_layer_weight_exp,
            style_blend_weights,
            tv_weight,
            learning_rate,
            beta1,
            beta2,
            epsilon,
            pooling,
            print_iterations=None,
            checkpoint_iterations=None):

    # Set parameters for training
    lr = 100
    content_coeff = 1
    style_coeff = 1000000
    tv_coeff = 0.000001

    directory = '../resnet/cifar_10_progress/'
    CONTENT_LAYERS = ['conv3', 'conv3_x']
    STYLE_LAYERS = [
        'conv1', 'conv2_x', 'conv2', 'conv3_x', 'conv3', 'conv4', 'conv4_x'
    ]

    content_layers_weights = {}
    for layer in CONTENT_LAYERS:
        content_layers_weights[layer] = 1

    style_layers_weights = {}
    for style_layer in STYLE_LAYERS:
        style_layers_weights[style_layer] = 1

    shape = (1, ) + content.shape
    style_shapes = [(1, ) + style.shape for style in styles]
    content_features = {}
    style_features = [{} for _ in styles]

    # compute content features in feedforward mode
    g = tf.Graph()
    with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:

        image = tf.placeholder('float', shape=shape)

        net, _ = resnet(image, 20)

        saver = tf.train.Saver()
        saver.restore(sess, directory)

        pre_content = np.array([content])

        for layer in CONTENT_LAYERS:
            content_features[layer] = net[layer].eval(
                feed_dict={image: pre_content})

    # compute style features in feedforward mode
    for i in range(len(styles)):
        g = tf.Graph()
        with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:
            image = tf.placeholder('float', shape=style_shapes[i])

            net, layer_list = resnet(image, 20)

            saver = tf.train.Saver()
            saver.restore(sess, directory)

            pre_style = np.array([styles[i]])

            for k in range(len(layer_list)):
                layer_list[k] = sess.run(layer_list[k],
                                         feed_dict={image: pre_style})

            for layer in STYLE_LAYERS:

                features = net[layer].eval(feed_dict={image: pre_style})
                features = np.reshape(features, (-1, features.shape[3]))
                gram = np.matmul(features.T, features) / features.size
                style_features[i][layer] = gram

    initial_content_noise_coeff = 1.0 - initial_noiseblend

    # make stylized image using backpropogation
    g_net = tf.Graph()
    with g_net.as_default():
        if initial is None:
            noise = np.random.normal(size=shape, scale=np.std(content) * 0.1)
            initial = tf.random_normal(shape) * 0.256
        else:
            initial = initial.astype('float32')
            noise = np.random.normal(size=shape, scale=np.std(content) * 0.1)
            initial = (initial) * initial_content_noise_coeff + (
                tf.random_normal(shape) *
                0.256) * (1.0 - initial_content_noise_coeff)
        #image = tf.Variable(initial)
        image = tf.Variable(pre_content.astype('float32'))

        net = resnet_m(image, 20, layer_list)

        # content loss
        content_loss = 0
        content_losses = []
        for content_layer in CONTENT_LAYERS:
            content_losses.append(
                content_layers_weights[content_layer] * content_weight *
                (2 * tf.nn.l2_loss(net[content_layer] -
                                   content_features[content_layer]) /
                 content_features[content_layer].size))
        content_loss += reduce(tf.add, content_losses)
        content_loss *= content_coeff

        # style loss
        style_loss = 0
        style_losses = []
        for style_layer in STYLE_LAYERS:

            # TensorBoard
            output = net[style_layer]
            output_shape = int(output.get_shape()[3])
            output_split = tf.split(output,
                                    num_or_size_splits=output_shape,
                                    axis=3)

            feature_list = []
            for j in range(output_shape):
                feature_list.append(output_split[j])
            output_show = tf.concat(feature_list, axis=0)
            tf.summary.image(str(layer), output_show, 1000)

            layer = net[style_layer]
            _, height, width, number = map(lambda i: i.value,
                                           layer.get_shape())
            size = height * width * number
            feats = tf.reshape(layer, (-1, number))
            gram = tf.matmul(tf.transpose(feats), feats) / size
            style_gram = style_features[0][style_layer]
            style_loss = style_layers_weights[style_layer] * 2 * tf.nn.l2_loss(
                gram - style_gram) / style_gram.size
        style_loss *= style_coeff

        # total variation denoising
        tv_y_size = _tensor_size(image[:, 1:, :, :])
        tv_x_size = _tensor_size(image[:, :, 1:, :])

        tv_loss = tv_weight * 2 * (
            (tf.nn.l2_loss(image[:, 1:, :, :] - image[:, :shape[1] - 1, :, :])
             / tv_y_size) +
            (tf.nn.l2_loss(image[:, :, 1:, :] - image[:, :, :shape[2] - 1, :])
             / tv_x_size))
        tv_loss *= tv_coeff
        # overall loss
        loss = content_loss + style_loss + tv_loss

        # optimizer setup
        train_step = tf.train.AdamOptimizer(lr, beta1, beta2,
                                            epsilon).minimize(loss)

        def print_progress():
            stderr.write('  content loss: %g\n' % content_loss.eval())
            stderr.write('    style loss: %g\n' % style_loss.eval())
            stderr.write('       tv loss: %g\n' % tv_loss.eval())
            stderr.write('    total loss: %g\n' % loss.eval())

        merge_summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter('./style_x/')

        # optimization
        best_loss = float('inf')
        best = None
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            stderr.write('Optimization started...\n')
            if (print_iterations and print_iterations != 0):
                print_progress()
            for i in range(iterations):
                stderr.write('Iteration %4d/%4d\n' % (i + 1, iterations))
                _ = sess.run([train_step])
                print_progress()

                last_step = (i == iterations - 1)
                if last_step or (print_iterations
                                 and i % print_iterations == 0):
                    print_progress()

                if (checkpoint_iterations
                        and i % checkpoint_iterations == 0) or last_step:
                    this_loss = loss.eval()
                    if this_loss < best_loss:
                        best_loss = this_loss
                        best = image.eval()

                    img_out = best.reshape(shape[1:])

                    if preserve_colors and preserve_colors == True:
                        original_image = np.clip(content, 0, 255)
                        styled_image = np.clip(img_out, 0, 255)

                        # Luminosity transfer steps:
                        # 1. Convert stylized RGB->grayscale accoriding to Rec.601 luma (0.299, 0.587, 0.114)
                        # 2. Convert stylized grayscale into YUV (YCbCr)
                        # 3. Convert original image into YUV (YCbCr)
                        # 4. Recombine (stylizedYUV.Y, originalYUV.U, originalYUV.V)
                        # 5. Convert recombined image from YUV back to RGB

                        # 1
                        styled_grayscale = rgb2gray(styled_image)
                        styled_grayscale_rgb = gray2rgb(styled_grayscale)

                        # 2
                        styled_grayscale_yuv = np.array(
                            Image.fromarray(
                                styled_grayscale_rgb.astype(
                                    np.uint8)).convert('YCbCr'))

                        # 3
                        original_yuv = np.array(
                            Image.fromarray(original_image.astype(
                                np.uint8)).convert('YCbCr'))

                        # 4
                        w, h, _ = original_image.shape
                        combined_yuv = np.empty((w, h, 3), dtype=np.uint8)
                        combined_yuv[..., 0] = styled_grayscale_yuv[..., 0]
                        combined_yuv[..., 1] = original_yuv[..., 1]
                        combined_yuv[..., 2] = original_yuv[..., 2]

                        # 5
                        img_out = np.array(
                            Image.fromarray(combined_yuv,
                                            'YCbCr').convert('RGB'))

                    yield ((None if last_step else i), img_out)
예제 #26
0
파일: test_model.py 프로젝트: yexuehua/DPED
res_sizes = utils.get_resolutions()

# get the specified image resolution
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_SIZE = utils.get_specified_res(
    res_sizes, phone, resolution)

# disable gpu if specified
config = tf.ConfigProto(
    device_count={'GPU': 0}) if use_gpu == "false" else None

# create placeholders for input images
x_ = tf.placeholder(tf.float32, [None, None, 3])
#x_image = tf.reshape(x_, [-1, xsize[0], xsize[1], 3])
x_imge = tf.expand_dims(x_, axis=0)
# generate enhanced image
enhanced = resnet(x_imge)
'''
with tf.Session(config=config) as sess:

    test_dir = dped_dir + phone.replace("_orig", "") + "/test_data/full_size_test_images/"
    test_photos = [f for f in os.listdir(test_dir) if os.path.isfile(test_dir + f)]

    if test_subset == "small":
        # use five first images only
        test_photos = test_photos[0:5]

    if phone.endswith("_orig"):

        # load pre-trained model
        saver = tf.train.Saver()
        saver.restore(sess, "models_orig/" + phone)
예제 #27
0
    # placeholders for training data
    print("Session initialized")
    phone_ = tf.placeholder(tf.float32, [None, PATCH_SIZE], name="train-phone")
    phone_image = tf.reshape(phone_, [-1, PATCH_HEIGHT, PATCH_WIDTH, 3], name="train-phone-img")

    dslr_ = tf.placeholder(tf.float32, [None, PATCH_SIZE], name="train-dslr")
    dslr_image = tf.reshape(dslr_, [-1, PATCH_HEIGHT, PATCH_WIDTH, 3], name="train-dslr-img")

    adv_ = tf.placeholder(tf.float32, [None, 1], name="cointoss")

    # get processed enhanced image

    if convdeconv:
        enhanced = models.convdeconv(phone_image, depth, parametric=parametric, s_conv=s_conv)
    else:
        enhanced = models.resnet(phone_image, kernel_size, depth, blocks, parametric=parametric, s_conv=s_conv)

    # 2) content loss

    with tf.name_scope("content_loss"):
        CONTENT_LAYER = 'relu5_4'

        enhanced_vgg = vgg.net(vgg_dir, vgg.preprocess(enhanced * 255))
        dslr_vgg = vgg.net(vgg_dir, vgg.preprocess(dslr_image * 255))

        content_size = utils._tensor_size(dslr_vgg[CONTENT_LAYER]) * batch_size
        loss_content = 2 * tf.nn.l2_loss(enhanced_vgg[CONTENT_LAYER] - dslr_vgg[CONTENT_LAYER]) / content_size

        tf.summary.scalar("loss_content", loss_content)

    # transform both dslr and enhanced images to grayscale
예제 #28
0





train_on_gpu = check_gpu()

if not train_on_gpu:
    print('Cuda is not available for traning.Traning on CPU.......')
else:
    print('Cuda is available for traning. Traning on GPU.......')


model_list ={'chexnet': chexnet(),
             'resnet' : resnet(num_of_classes = 2)}

def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--lr',type=float,default=0.1)
    arg('--n_epochs',type=int, default = 5)
    arg('--batch-size',type=int, default= 32)
    arg('--data_dir',type=str,default = 'chest_xray')
    arg('--model',type=str, default ='chexnet',choices = model_list.keys())
    arg('--root',type=str,default ='runs/debug', help = 'checkpoint root')

    args = parser.parse_args()

    train_loader= generate_trainloaders(data_dir= args.data_dir,
                                                batch_size= args.batch_size)
        data_slice=1.0,
        return_path=True,
    )
    dataloader = data.DataLoader(
        dataset,
        batch_size=config["batch_size"],
        shuffle=False,
        num_workers=config["workers"],
    )
    if config["dataset_info"]:
        utils.dataloader_info(dataloader)

    # Initialize ship or no-ship detection network and then laod the weigths
    print("Loading ship detection model...")
    num_classes = 1
    clf_net = models.resnet(config["clf_resnet"], num_classes)

    print("Loading model weights from {}...".format(clf_checkpoint))
    checkpoint = torch.load(clf_checkpoint, map_location=torch.device("cpu"))
    clf_net.load_state_dict(checkpoint["model"])

    print("Loading segmentation model...")
    model_str = config["seg_model"].lower()
    if model_str == "enet":
        seg_net = models.ENet(num_classes)
    elif model_str == "linknet":
        seg_net = models.LinkNet(num_classes)
    elif model_str == "linknet34":
        seg_net = models.LinkNet(num_classes, 34)
    elif model_str == "dilatedunet":
        seg_net = models.DilatedUNet(classes=num_classes)
예제 #30
0
res_sizes = utils.get_resolutions()

# get the specified image resolution
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_SIZE = utils.get_specified_res(
    res_sizes, phone, resolution)

# disable gpu if specified
config = tf.ConfigProto(
    device_count={'GPU': 0}) if use_gpu == "false" else None

# create placeholders for input images
x_ = tf.placeholder(tf.float32, [None, IMAGE_SIZE])
x_image = tf.reshape(x_, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 3])

# generate enhanced image
enhanced = resnet(x_image)

with tf.Session(config=config) as sess:

    test_dir = dped_dir + 'Test/' + phone.replace("_orig", "") + '/Full/'
    test_photos = [
        f for f in os.listdir(test_dir) if os.path.isfile(test_dir + f)
    ]

    if test_subset == "small":
        # use five first images only
        test_photos = test_photos[0:5]

    if phone.endswith("_orig"):

        # load pre-trained model
예제 #31
0
def test(test_path, agumentation, **kwargs):
    """
    测试模型性能
    :param
        test_path(str) -- 测试集地址
        agumentation(bool) -- 是否对单个图片多次复制
    :kwargs
        model(int) -- 模型
    """
    # 设置超参数
    if agumentation:
        BATCH_SIZE = 1
    else:
        BATCH_SIZE = 32

    # 选择运行的cpu或gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 定义损失函数
    # N / n,权重为各类别频率的倒数
    weight = torch.Tensor([9., 1.5, 19.48, 30.62, 9.11, 86.86, 71.])
    weight = weight.to(device)
    criterion = nn.CrossEntropyLoss(weight=weight)

    # 数据处理
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    test_transform = transforms.Compose(
        [transforms.RandomCrop(224),
         transforms.ToTensor(), normalize])

    # 加载数据
    # 定义test_loader
    test_dataset = SkinDiseaseDataset(test_path,
                                      transforms=test_transform,
                                      agumentation=agumentation)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)

    # 加载模型
    if kwargs['model'] == 1:
        model = MFFNet()
    elif kwargs['model'] == 2:
        # 299 x 299
        model = inception(pretrained=False)
    elif kwargs['model'] == 3:
        model = resnet(pretrained=False)
    elif kwargs['model'] == 4:
        model = densenet(pretrained=False)
    elif kwargs['model'] == 5:
        model = senet(pretrained=False)
    # 加载模型权重
    model.load_state_dict(torch.load(CONFIG.best_model))
    model = model.to(device)

    # 测试模式
    model.eval()
    # 各类别预测正确个数
    class_correct = list(0. for i in range(7))
    # 各类别总个数
    class_total = list(0. for i in range(7))
    # 损失
    sum_loss = 0.0
    # 总预测正确个数
    correct = 0
    # 总个数
    total = 0
    # 总迭代次数
    cnt = 0
    # 测试集增强模式
    if agumentation:
        # 预测标签情况
        x = []
        # 真实标签情况
        y = []
        for data in test_loader:
            cnt += 1

            # 加载数据
            image, label = data
            image = image.view(-1, 3, 224, 224)
            label = label[0]
            image, label = image.to(device), label.to(device)

            # 前向传播
            output = model(image)

            # 使用平均策略获取预测值
            output = output.detach()
            # 平均策略
            val = None
            for i in range(output.size(0)):
                if val is None:
                    val = output[i]
                else:
                    val = val + output[i]
            val = val / output.size(0)
            _, a = torch.max(val, 0)

            # 统计各个类预测正确的个数
            m = label.detach()
            class_correct[m] += 1 if a == m else 0
            class_total[m] += 1
            # 统计预测正确总个数
            correct += 1 if a == m else 0

            x.append(a.item())
            y.append(m.item())
        # list转化为numpy
        x = np.array(x)
        y = np.array(y)
    else:
        # 预测标签情况
        x = None
        # 真实标签情况
        y = None
        for data in test_loader:
            cnt += 1

            # 加载数据
            image, label = data
            image, label = image.to(device), label.to(device)

            # 前向传播
            output = model(image)
            loss = criterion(output, label)

            # 计算loss和acc
            sum_loss += loss.item()
            _, a = torch.max(output.detach(), 1)
            b = label.detach()
            total += label.size(0)
            correct += (a == b).sum()

            # 预测和真实标签情况
            if x is None:
                x = a
                y = b
            else:
                x = torch.cat((x, a))
                y = torch.cat((y, b))

            # 统计每个类别的正确预测情况
            for i in range(label.size(0)):
                m = b[i]
                class_correct[m] += 1 if a[i] == m else 0
                class_total[m] += 1
        # tensor转化为numpy
        x = x.cpu().detach().numpy()
        y = y.cpu().detach().numpy()

    # 打印结果
    cm_plot_labels = ['MEL', 'NV', 'BCC', 'AKIEC', 'BKL', 'DF', 'VASC']
    # 判断测试集是否增强
    if agumentation:
        # 打印acc
        print("test_acc:%.2f%%\n" % (100 * correct / cnt))
    else:
        # 打印loss和acc
        print("test_loss:%.2f test_acc:%.2f%%\n" %
              (sum_loss / cnt, 100 * correct / total))
    # 打印每个类别的acc
    for i in range(7):
        if class_total[i] > 0:
            print('Test Accuracy of %5s: %.2f%% (%2d/%2d)' %
                  (cm_plot_labels[i], 100 * class_correct[i] / class_total[i],
                   class_correct[i], class_total[i]))
        else:
            print('Test Accuracy of %5s: N/A (no training examples)' %
                  cm_plot_labels[i])
    print('')

    # 计算混淆矩阵
    cm = confusion_matrix(y, x)
    print('')

    # 计算BMC
    balanced_multiclass_accuracy(cm)
    print('')

    # 可视化混淆矩阵
    plot_confusion_matrix(cm, cm_plot_labels, title='Confusion Matrix')
    print('')

    # 打印分类报告
    report = classification_report(y, x, target_names=cm_plot_labels)
    print(report)