Exemple #1
0
def main():

    import json
    import os
    import time
    timers = {}

    STAT_REPEAT = os.environ.get('STAT_REPEAT', '')
    if STAT_REPEAT == '' or STAT_REPEAT == None:
        STAT_REPEAT = 50
    STAT_REPEAT = int(STAT_REPEAT)
    """ Call model construction function and run model multiple times. """
    model = vgg16()
    test_x = np.random.rand(224, 224, 3)

    config = tf.ConfigProto()
    #    config.gpu_options.allow_growth = True
    #    config.gpu_options.allocator_type = 'BFC'
    config.gpu_options.per_process_gpu_memory_fraction = float(
        os.getenv('CK_TF_GPU_MEMORY_PERCENT', 33)) / 100.0

    sess = tf.Session(config=config)
    keras.backend.set_session(sess)

    dt = time.time()
    for _ in range(STAT_REPEAT):
        model.predict(np.array([test_x]))

    t = (time.time() - dt) / STAT_REPEAT

    timers['execution_time_classify'] = t
    timers['execution_time'] = t

    with open('tmp-ck-timer.json', 'w') as ftimers:
        json.dump(timers, ftimers, indent=2)
Exemple #2
0
def train_model(image_dir='data/data_road/training/image_2',
                label_dir='data/data_road/training/gt_image_2',
                job_dir='/tmp/semantic_segmenter', **args):
    # set the logging path for ML Engine logging to Storage bucket
    logs_path = job_dir + '/logs/' + datetime.now().isoformat()
    print('Using logs_path located at {}'.format(logs_path))

    train_generator, validate_generator = create_generators(image_dir, label_dir, batch_size=batch_size, target_size=target_size)
    model = vgg16(dropout=0.2, target_size=target_size)
    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.001),
                  metrics=['accuracy'])
    #tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
    history = model.fit_generator(
                        train_generator,
                        validation_data=validate_generator,
                        epochs=epochs,
                        steps_per_epoch=850/batch_size,
                        validation_steps=150/batch_size,
                        verbose=1)
                        #callbacks=[tensorboard])

    # Save the model locally
    model.save('model.h5')


    # Save the model to the Cloud Storage bucket's jobs directory
    with file_io.FileIO('model.h5', mode='rb') as input_f:
        with file_io.FileIO(job_dir + '/model.h5', mode='w+') as output_f:
            output_f.write(input_f.read())
Exemple #3
0
 def __init__(self,
              input_shape=(448, 448, 3),
              model=None,
              grid_size=(7, 7),
              bounding_boxes=2,
              number_classes=20,
              dropout=0.1):
     ## array (3, 2) => hauteur 3, largeur 2
     self.Sx = grid_size[1]  # separate image in cell (per line/column)
     self.Sy = grid_size[0]  # separate image in cell (per line/column)
     self.B = bounding_boxes  # number of bounding boxes per cell
     self.C = number_classes  # number of classes
     self.output_shape = (self.Sy, self.Sx, self.B * 5 + self.C)
     self.lambda_coord = 5
     self.lambda_noobj = 0.5
     self.resize_shape = input_shape
     if model == 'vgg16':
         self.model = vgg16(input_shape=input_shape,
                            grid_size=(self.Sy, self.Sx),
                            bounding_boxes=self.B,
                            num_class=self.C)
         self.model.summary()
     if model == 'mobilenet':
         self.model = mobilenet_yolo(input_shape=input_shape,
                                     grid_size=(self.Sy, self.Sx),
                                     bounding_boxes=self.B,
                                     num_class=self.C,
                                     dropout=dropout)
         self.model.summary()
     self.class_to_number = None
Exemple #4
0
    def _setup(self):
        h, w = self.face_size

        self.x = sm.Tensor()
        self.mask = sm.Tensor()
        self.feat, ho, wo, co = vgg16(self.x, h, w)
        self.embed = facenet_head(self.feat, ho, wo, co)
        self.loss, self.dis = facenet_loss(self.embed, self.alpha, self.mask)
Exemple #5
0
    def __init__(self, config):
        if not isinstance(config, str):
            self.config = config
        else:
            assert os.path.exists(config)
            self.config = json.load(open(config))
        assert 'name' in self.config
        assert 'data_path' in self.config
        assert 'balanced' in self.config
        assert 'num_samples' in self.config
        os.environ["CUDA_VISIBLE_DEVICES"] = self.config['gpu_ids']

        self.date = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())

        # create project folder
        if not os.path.exists(os.path.join('./results/', self.config['name'])):
            os.makedirs(os.path.join('./results/', self.config['name']))

        if self.config['model']['name'] == 'resnet50':
            self.model = resnet50(
                pretrained=True,
                num_classes=self.config['model']['num_classes'])
        elif self.config['model']['name'] == 'resnet101':
            self.model = resnet101(
                pretrained=True,
                num_classes=self.config['model']['num_classes'])
        elif self.config['model']['name'] == 'inception_v3':
            self.model = inception_v3(
                pretrained=True,
                num_classes=self.config['model']['num_classes'])
        elif self.config['model']['name'] == 'vgg16':
            self.model = vgg16(pretrained=True,
                               num_classes=self.config['model']['num_classes'])
        elif self.config['model']['name'] == 'vgg19_bn':
            self.model = vgg19_bn(
                pretrained=True,
                num_classes=self.config['model']['num_classes'])
        elif self.config['model']['name'] == 'ws_dan_resnet50':
            self.model = ws_dan_resnet50(
                pretrained=True,
                num_classes=self.config['model']['num_classes'],
                num_attentions=self.config['model']['num_attentions'])
        self.model.cuda()
        if len(self.config['gpu_ids']) > 1:
            self.model = nn.DataParallel(self.model)
        #self.criterion = nn.CrossEntropyLoss()
        self.criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor(
            [1.0, 3.0]).cuda())  #数据不均衡时可修改损失函数权重
        self.criterion_attention = nn.MSELoss()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.config['model']['init_lr'],
                                   momentum=0.9,
                                   weight_decay=1e-4)
        # self.exp_lr_schedler = lr_scheduler.MultiStepLR(self.optimizer, milestones=self.config['model']['milestones'], gamma=0.1)
        # self.exp_lr_schedler = lr_scheduler.StepLR(self.optimizer, step_size=2, gamma=0.9)
        self.exp_lr_schedler = lr_scheduler.StepLR(self.optimizer,
                                                   step_size=10,
                                                   gamma=0.6)
Exemple #6
0
    def load_state(self, _type='test'):
        if self.config['model']['name'] == 'resnet50':
            self.model = resnet50(
                pretrained=False,
                num_classes=self.config['model']['num_classes'])
        elif self.config['model']['name'] == 'resnet101':
            self.model = resnet101(
                pretrained=False,
                num_classes=self.config['model']['num_classes'])
        elif self.config['model']['name'] == 'inception_v3':
            self.model = inception_v3(
                pretrained=False,
                num_classes=self.config['model']['num_classes'])
        elif self.config['model']['name'] == 'vgg16':
            self.model = vgg16(pretrained=False,
                               num_classes=self.config['model']['num_classes'])
        elif self.config['model']['name'] == 'vgg19_bn':
            self.model = vgg19_bn(
                pretrained=False,
                num_classes=self.config['model']['num_classes'])
        elif self.config['model']['name'] == 'ws_dan_resnet50':
            self.model = ws_dan_resnet50(
                pretrained=True,
                num_classes=self.config['model']['num_classes'],
                num_attentions=self.config['model']['num_attentions'])

        if _type == 'test':
            # checkpoints = os.path.join('./zhongshan/new_test_file_20200119', self.config['name'], 'checkpoints',
            #                            self.config['test']['checkpoint'])
            checkpoints = os.path.join('.', self.config['test']['checkpoint'])
        elif _type == 'test_batch':
            checkpoints = self.config['test']['checkpoint']
        else:
            checkpoints = os.path.join('./results', self.config['name'],
                                       'checkpoints',
                                       self.config['inference']['checkpoint'])
        self.model.load_state_dict(torch.load(checkpoints)['state_dict'])
        self.model.cuda()
        if len(self.config['gpu_ids']) > 1:
            self.model = nn.DataParallel(self.model)
Exemple #7
0
def objective(params):
    """
    Objective function to be minimized.

    Parameters
    ----------
    `params` [list]
        Hyperparameters to be set in optimization iteration.
        - Managed by hyperdrive.
    """
    kernel1 = int(params[0])
    kernel2 = int(params[1])
    # kernel3 = int(params[2])
    # kernel4 = int(params[3])
    # kernel5 = int(params[4])
    # kernel6 = int(params[5])
    # batch_size = int(params[6])

    # model = vgg16(kernel1=kernel1, kernel2=kernel2, kernel3=kernel3,
    #               kernel4=kernel4, kernel5=kernel5, kernel6=kernel6)
    model = vgg16(kernel1=kernel1, kernel2=kernel2)

    model.compile(optimizer=keras.optimizers.rmsprop(lr=0.0001, decay=1e-6),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(x_val, y_val),
              shuffle=True)

    # Score trained model.
    scores = model.evaluate(x_val, y_val, verbose=1)
    print('Validation loss:', scores[0])
    print('Validation accuracy:', scores[1])

    return scores[0]
    # -- Note: Use validation set and disable the flipped to enable faster loading.

    input_dir = os.path.join('./data/results', args.train_id, 'model', args.model_name)
    if not os.path.exists(input_dir):
        raise Exception('There is no input directory for loading network from ' + input_dir)

    pascal_classes = np.asarray(['__background__',
                                 'aeroplane', 'bicycle', 'bird', 'boat',
                                 'bottle', 'bus', 'car', 'cat', 'chair',
                                 'cow', 'diningtable', 'dog', 'horse',
                                 'motorbike', 'person', 'pottedplant',
                                 'sheep', 'sofa', 'train', 'tvmonitor'])

    # initilize the network here.
    if args.net == 'vgg16':
        fasterRCNN = vgg16(pascal_classes, pretrained=False, class_agnostic=args.class_agnostic, args=args)
    else:
        print("network is not defined")
        pdb.set_trace()

    fasterRCNN.create_architecture()

    print("load checkpoint %s" % (input_dir))
    checkpoint = torch.load(input_dir)
    fasterRCNN.load_state_dict(checkpoint['model'])
    if 'pooling_mode' in checkpoint.keys():
        args.POOLING_MODE = checkpoint['pooling_mode']

    print('load model successfully!')

    # initilize the tensor holder here.
Exemple #9
0
import matplotlib.pyplot as plt
from model import vgg16

use_cuda = torch.cuda.is_available()

train_dataset = ImageFolder(root=(sys.argv[1] + '/train'),
                            transform=transforms.ToTensor())
valid_dataset = ImageFolder(root=(sys.argv[1] + '/valid'),
                            transform=transforms.ToTensor())
batch_size = 32
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1)

net = vgg16(pretrained=False)
if use_cuda:
    net = net.cuda()

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=1e-4)
loss_list, acc_list = [], []


def train(epoch):
    net.train()
    for i, (images, labels) in enumerate(train_loader):
        if use_cuda:
            images, labels = images.cuda(), labels.cuda()
        #print(images, labels)
        optimizer.zero_grad()
Exemple #10
0
import tensorflow as tf
import numpy as np
from scipy import ndimage
import apriori
import cv2
import sys
from misc import find_item, bbox_plot
from model import vgg16

im_dir = sys.argv[1]
features, pred_vec = vgg16(im_dir)
td = {}
d = {}
test = {}
lis = []
j = 0
for feat in features:
    for i in range(512):
        td[j, i], d[j, i] = find_item(feat[:, :, i])
    j += 1

feature_map = tf.concat((features[0, :, :, :], features[1, :, :]), axis=2)

li = [i for i in td.values() if i]
pixel = {}

for i in range(len(li)):
    pixel[i] = li[i]

freq = apriori.run(pixel, 20)
# print (freq)
Exemple #11
0
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=True,
                                          num_workers=2)

testset = torchvision.datasets.FashionMNIST(root='./data',
                                            train=False,
                                            download=True,
                                            transform=test_transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=64,
                                         shuffle=False,
                                         num_workers=2)

# Define a Convolution Neural Network
net = model.vgg16(num_classes=10)

if gpu_statue:
    net = net.cuda()
    print('*' * 26, '使用gpu', '*' * 26)
else:
    print('*' * 26, '使用cpu', '*' * 26)

# Define a Loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)

tr_acc, ts_acc, loss_p, time_p = [], [], [], []

# Train and test the network
start_time = time.time()
Exemple #12
0
def main():
    """ Call model construction function and run model multiple times. """
    model = vgg16()
    test_x = np.random.rand(224, 224, 3)
    for _ in range(50):
        model.predict(np.array([test_x]))
Exemple #13
0
def train(epochs):
    """
    训练模型
    :param epochs:
    :return:
    """

    vgg = vgg16((None, 224, 224, 3), 102)
    resnet = resnet50((None, 224, 224, 3), 102)
    densenet = densenet121((None, 224, 224, 3), 102)
    models = [vgg, resnet, densenet]
    train_db, valid_db = load_db(32)
    his = []
    for model in models:
        variables = model.trainable_variables
        optimizers = tf.keras.optimizers.Adam(1e-4)
        for epoch in range(epochs):
            # training
            total_num = 0
            total_correct = 0
            training_loss = 0
            for step, (x, y) in enumerate(train_db):
                with tf.GradientTape() as tape:
                    # train
                    out = model(x)
                    loss = tf.losses.categorical_crossentropy(
                        y, out, from_logits=False)
                    loss = tf.reduce_mean(loss)
                    training_loss += loss
                    grads = tape.gradient(loss, variables)
                    optimizers.apply_gradients(zip(grads, variables))
                    # training accuracy
                    y_pred = tf.cast(tf.argmax(out, axis=1), dtype=tf.int32)
                    y_true = tf.cast(tf.argmax(y, axis=1), dtype=tf.int32)
                    correct = tf.reduce_sum(
                        tf.cast(tf.equal(y_pred, y_true), dtype=tf.int32))
                    total_num += x.shape[0]
                    total_correct += int(correct)
                if step % 100 == 0:
                    print("loss is {}".format(loss))
            training_accuracy = total_correct / total_num

            # validation
            total_num = 0
            total_correct = 0
            for (x, y) in valid_db:
                out = model(x)
                y_pred = tf.argmax(out, axis=1)
                y_pred = tf.cast(y_pred, dtype=tf.int32)
                y_true = tf.argmax(y, axis=1)
                y_true = tf.cast(y_true, dtype=tf.int32)
                correct = tf.cast(tf.equal(y_pred, y_true), dtype=tf.int32)
                correct = tf.reduce_sum(correct)
                total_num += x.shape[0]
                total_correct += int(correct)
            validation_accuracy = total_correct / total_num
            print(
                "epoch:{}, training loss:{:.4f}, training accuracy:{:.4f}, validation accuracy:{:.4f}"
                .format(epoch, training_loss, training_accuracy,
                        validation_accuracy))
            his.append({
                'accuracy': training_accuracy,
                'val_accuracy': validation_accuracy
            })
    return his
Exemple #14
0
def train(FLAGS):
    """training model

    """
    batch_size =  FLAGS.batch_size
    num_classes =  FLAGS.num_classes
    
    train_tfrecords_file = FLAGS.train_tfrecords_file
    valid_tfrecords_file = FLAGS.valid_tfrecords_file
    
    tensorboard_dir = FLAGS.tensorboard_dir
    
    saver_dir = FLAGS.saver_dir

    image_height, image_width, image_channel = (FLAGS.image_height, FLAGS.image_width, FLAGS.image_channel)

    mode = FLAGS.mode

    base_learning_rate = FLAGS.base_learning_rate

    max_steps = FLAGS.max_steps

    valid_steps = FLAGS.valid_steps

    keep_prob = FLAGS.keep_prob

    train_loss, train_acc = [], []
    valid_loss, valid_acc = [], []


    with tf.Graph().as_default():
        # define image and label placehold
        images_pl = tf.placeholder(tf.float32, shape=[batch_size, image_height, image_width, image_channel])
        labels_pl = tf.placeholder(tf.int32, shape=[batch_size, num_classes])
        is_training_pl = tf.placeholder(tf.bool, name='is_training')
        # keep_prob_pl = tf.placeholder(tf.float32)

        # get training image with batch 
        train_tf_image, train_tf_label = read_tfrecords(train_tfrecords_file, shape=(image_height, image_width, image_channel))
        train_tf_image = preprocessing_image(train_tf_image, mode=mode)
        train_tf_image_batch, train_tf_label_batch = generate_image_label_batch(train_tf_image, train_tf_label, batch_size, num_classes)

        # validation image with batch
        valid_tf_image, valid_tf_label = read_tfrecords(valid_tfrecords_file, shape=(image_height, image_width, image_channel))
        valid_tf_image = preprocessing_image(valid_tf_image, mode=mode)
        valid_tf_image_batch, valid_tf_label_batch = generate_image_label_batch(valid_tf_image, valid_tf_label, batch_size, num_classes)

        # compute logits from model
        logits = vgg16(images_pl, num_classes, keep_prob, is_training_pl)
        # get global steps
        global_steps = tf.Variable(0, trainable=False)
        # compute loss, accucracy and predictions
        loss, accuracy, _ = calc_loss_acc(labels_pl, logits)
        # training operator to update model params
        training_op = train_op(loss, global_steps, base_learning_rate, option='SGD')

        # define the model saver
        saver = tf.train.Saver(tf.global_variables())
        
        # define a summary operation 
        summary_op = tf.summary.merge_all()

        with tf.Session() as sess:
            sess.run(tf.local_variables_initializer())
            sess.run(tf.global_variables_initializer())

            # start queue runner
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            # set up summary file writer
            train_writer = tf.summary.FileWriter(tensorboard_dir + 'train', sess.graph)
            valid_writer = tf.summary.FileWriter(tensorboard_dir + 'valid')

            for step in range(max_steps):
                # load image and label with batch size
                train_image_batch, train_label_batch = sess.run([train_tf_image_batch, train_tf_label_batch])
                # define training feed dict
                train_feed_dict = {images_pl: train_image_batch, labels_pl: train_label_batch, is_training_pl: True}
                _, _loss, _acc, _summary_op = sess.run([training_op, loss, accuracy, summary_op], feed_dict=train_feed_dict)

                # store loss and accuracy value
                train_loss.append(_loss)
                train_acc.append(_acc)

                # print loss and acc
                # print("Iteration " + str(step) + ", Mini-batch Loss= " + "{:.6f}".format(_loss) + ", Training Accuracy= " + "{:.5f}".format(_acc))

                # if step%10 == 0:
                    # _logits = sess.run(logits, feed_dict=train_feed_dict)
                    # print('Per class accuracy by logits in training time', per_class_acc(train_label_batch, _logits))
                print("Iteration " + str(step) + ", Mini-batch Loss= " + "{:.6f}".format(_loss) + ", Training Accuracy= " + "{:.5f}".format(_acc))
                train_writer.add_summary(_summary_op, step)
                train_writer.flush()

                if step%5 == 0:
                    print('start validation process')
                    _valid_loss, _valid_acc = [], []

                    for valid_step in range(valid_steps):
                        valid_image_batch, valid_label_batch = sess.run([valid_tf_image_batch, valid_tf_label_batch])

                        valid_feed_dict = {images_pl: valid_image_batch, labels_pl: valid_label_batch, is_training_pl: True}

                        _loss, _acc, _summary_op = sess.run([loss, accuracy, summary_op], feed_dict = valid_feed_dict)

                        valid_writer.add_summary(_summary_op, valid_step)
                        valid_writer.flush()

                        _valid_loss.append(_loss)
                        _valid_acc.append(_acc)

                    valid_loss.append(np.mean(_valid_loss))
                    valid_acc.append(np.mean(_valid_acc))
                    print("Iteration {}: Train Loss {:6.3f}, Train Acc {:6.3f}, Val Loss {:6.3f}, Val Acc {:6.3f}".format(step, train_loss[-1], train_acc[-1], valid_loss[-1], valid_acc[-1]))

            checkpoint_path = os.path.join(saver_dir, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=step)
                
            coord.request_stop()
            coord.join(threads)
Exemple #15
0
def build_rpn(**kwargs):
    backbone = model.vgg16(**kwargs)
    rpn_head = model.RPNHead()
    network = nn.Sequential(backbone, rpn_head)
    return network
        im_info = im_info.cuda()
        num_boxes = num_boxes.cuda()
        gt_boxes = gt_boxes.cuda()

    # make variable
    im_data = Variable(im_data)
    im_info = Variable(im_info)
    num_boxes = Variable(num_boxes)
    gt_boxes = Variable(gt_boxes)

    if args.cuda:
        args.CUDA = True

    # initilize the network here.
    if args.net == 'vgg16':
        fasterRCNN = vgg16(pd.classes, args, pretrained=True, \
                           class_agnostic=args.class_agnostic)
    else:
        print("network is not defined")
        # pdb.set_trace()

    fasterRCNN.create_architecture()

    lr = args.lr

    params = []
    for key, value in dict(fasterRCNN.named_parameters()).items():
        if value.requires_grad:
            if 'bias' in key:
                params += [{'params': [value], 'lr': lr * (args.DOUBLE_BIAS + 1), \
                            'weight_decay': args.BIAS_DECAY and args.WEIGHT_DECAY \
                                            or 0}]
        num_boxes = num_boxes.cuda()
        gt_boxes = gt_boxes.cuda()

    # make variable
    if True:
        im_data = Variable(im_data)
        im_info = Variable(im_info)
        num_boxes = Variable(num_boxes)
        gt_boxes = Variable(gt_boxes)

    if args.cuda:
        args.CUDA = True

    # initilize the network here.
    if args.net == 'vgg16':
        basenet = vgg16(pd_test.classes, args, pretrained=True)
    elif args.net == 'res50':
        basenet = res50(pd_test.classes, args, pretrained=True)
    else:
        print("network is not defined")

    basenet.create_architecture()

    load_name = os.path.join('./data/results', args.train_id, 'model', args.model_name)
    print("loading checkpoint %s" % load_name)
    checkpoint = torch.load(load_name)
    basenet.load_state_dict(checkpoint['model'])
    print("loaded checkpoint %s" % load_name)

    if args.cuda:
        basenet.cuda()
Exemple #18
0
def main():
    args = parse_args()

    # Params
    epochs = args.epoch
    sample_output = args.sample_output
    sample_nums = args.sample_nums
    batch_size = args.batch_size
    gpu_n = args.gpu

    pretrain_epochs = min(25, int(epochs / 4))

    torch.cuda.set_device(gpu_n)

    sample_interval = epochs // sample_nums

    # Hyperparams
    learning_rate_d = 1e-3
    learning_rate_g = 1e-3

    # Class names 
    class_names = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]

    # CIFAR dataset
    transform = transforms.Compose([transforms.CenterCrop(32), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))]) # https://github.com/kuangliu/pytorch-cifar/issues/19
    #transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
    #transform = transforms.Compose([transforms.ToTensor()])

    train_dataset = datasets.CIFAR10("./data", train=True, transform=transform, download=True)
    test_dataset = datasets.CIFAR10("./data", train=False, transform=transform, download=True)

    train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)

    # Build model
    #_d = model.D2(3, 11)
    _d = model.vgg16(11)
    _d.cuda()

    _g = model.CCNN()
    _g.cuda()

    # Loss and optimizer
    criterion_d = nn.CrossEntropyLoss().cuda()
    optimizer_d = torch.optim.Adam(_d.classifier.parameters(), lr=learning_rate_d)

    criterion_g = nn.CrossEntropyLoss().cuda()
    optimizer_g = torch.optim.Adam(_g.parameters(), lr=learning_rate_g)

    total_batches = len(train_dataset)
    _i = 1

    for epoch in range(pretrain_epochs):
        _d.train()
        for i, (images, labels) in enumerate(train_loader):
            current_batch_size = len(images)
            fake_batch_size = max(1, int(current_batch_size / 8))
            print("Training batch: %d / %d" % (i, total_batches), end="\r")
            sys.stdout.flush()

            # Prepare images and labels
            images = Variable(images.cuda())

            # Put labels to GPU
            labels = Variable(labels.cuda())
            #print(labels)

            # Train D network
            optimizer_d.zero_grad()
            outputs_d = _d(images)
            real_loss = criterion_d(outputs_d, labels)
            real_loss.backward()            
            optimizer_d.step()

        # Log and outputs
        _d.eval()
        correct_d = 0
        total_d = 0

        for images, labels in test_loader:
            images = Variable(images).cuda()
            outputs = _d(images)

            _, predicted = torch.max(outputs.data, 1)
            total_d += labels.size(0)
            correct_d += (predicted.cpu() == labels).sum()
        
        print("Pretrain Epoch [%d/%d], Iter [%d/%d] D loss:%.5f D accuracy: %.2f%%" % (
            epoch + 1,
            pretrain_epochs,
            i + 1,
            len(train_dataset) // batch_size,
            real_loss.data[0],
            (100 * correct_d / total_d)
        ))

    optimizer_d = torch.optim.Adam(_d.classifier.parameters(), lr=learning_rate_d)

    for epoch in range(epochs):
        _d.train()
        for i, (images, labels) in enumerate(train_loader):
            current_batch_size = len(images)
            fake_batch_size = max(1, int(current_batch_size / 8))
            print("Training batch: %d / %d" % (i, total_batches), end="\r")
            sys.stdout.flush()

            # Prepare images and labels
            images = Variable(images.cuda())

            # Put labels to GPU
            labels = Variable(labels.cuda())
            #print(labels)

            # Train D network
            optimizer_d.zero_grad()
            outputs_d = _d(images)
            real_loss = criterion_d(outputs_d, labels)
            real_loss.backward()            
            #print(outputs_d)

            # Generate fake labels
            noise = Variable(torch.cuda.FloatTensor(fake_batch_size, 3, 32, 32).normal_())
            fake_labels = np.zeros(fake_batch_size) + 10
            fake_labels_d = Variable(torch.from_numpy(fake_labels).long().cuda())
            
            #print(fake_labels)
            #input()
           
            # Generate fake images and classify
            fake_labels_g = np.random.randint(0, 10, fake_batch_size)      
            labels_fake_onehot = torch.from_numpy(one_hot(fake_labels_g)).float().cuda()
            labels_fake_onehot = Variable(labels_fake_onehot)
            fake_images = _g(labels_fake_onehot, noise)
            
            
            std = torch.cuda.FloatTensor(fake_batch_size, 3, 32, 32)
            std[:,0,:,:] += 0.247
            std[:,1,:,:] += 0.243
            std[:,2,:,:] += 0.261

            mean = torch.cuda.FloatTensor(fake_batch_size, 3, 32, 32)
            mean[:,0,:,:] += 0.4914
            mean[:,1,:,:] += 0.4822
            mean[:,2,:,:] += 0.4465
            
            fake_images = (fake_images - Variable(mean)) / Variable(std)
            
            fake_outputs= _d(fake_images.detach())

            # Calculate loss
            fake_loss = criterion_d(fake_outputs, fake_labels_d)
            fake_loss.backward()

            loss_d = real_loss + fake_loss
            #loss_d = real_loss
            optimizer_d.step()

            # Train G network
            optimizer_g.zero_grad()
            noise = Variable(torch.cuda.FloatTensor(current_batch_size, 3, 32, 32).normal_())
            
            fake_labels = np.random.randint(0, 10, current_batch_size)      
            labels_fake_onehot = torch.from_numpy(one_hot(fake_labels)).float().cuda()
            labels_fake_onehot = Variable(labels_fake_onehot)

            fake_labels = torch.from_numpy(fake_labels)
            fake_labels = Variable(fake_labels.cuda())

            images_g = _g(labels_fake_onehot, noise)

            
            std = torch.cuda.FloatTensor(current_batch_size,3, 32, 32)
            std[:,0,:,:] += 0.247
            std[:,1,:,:] += 0.243
            std[:,2,:,:] += 0.261

            mean = torch.cuda.FloatTensor(current_batch_size, 3, 32, 32)
            mean[:,0,:,:] += 0.4914
            mean[:,1,:,:] += 0.4822
            mean[:,2,:,:] += 0.4465

            images_g = (images_g - Variable(mean)) / Variable(std)
            
            truth = _d(images_g.detach())

            loss_g = criterion_g(truth, fake_labels)
            loss_g.backward()
            optimizer_g.step()

            if i % 100 == 99:
                print("Epoch [%d/%d], Iter [%d/%d] D loss:%.5f G loss:%.5f" % (
                    epoch + 1,
                    epochs,
                    i + 1,
                    len(train_dataset) // batch_size,
                    loss_d.data[0],
                    loss_g.data[0]
                ))

        # Log and outputs
        _d.eval()
        correct_d = 0
        total_d = 0

        for images, labels in test_loader:
            images = Variable(images).cuda()
            outputs = _d(images)

            _, predicted = torch.max(outputs.data, 1)
            total_d += labels.size(0)
            correct_d += (predicted.cpu() == labels).sum()
        
        print("Epoch [%d/%d], Iter [%d/%d] D loss:%.5f D accuracy: %.2f%% G loss:%.5f" % (
            epoch + 1,
            epochs,
            i + 1,
            len(train_dataset) // batch_size,
            loss_d.data[0],
            (100 * correct_d / total_d),
            loss_g.data[0]
        ))
                

        if _i == sample_interval:
            _i = 1
            print("Generating images: ", end="\r")
            generate_batch_images(_g, 5, start=0, end=9, prefix="training-epoch-%d" % (epoch + 1), figure_path=sample_output, labels=class_names)
            sys.stdout.flush()
            print("Generated images for epoch %d" % (epoch + 1))
        else:
            _i += 1

    return 0
Exemple #19
0
import torchvision
from torchvision import transforms
from collections import OrderedDict
from torch.autograd import Variable
from torchvision.datasets import ImageFolder
from model import vgg16
import torch
import numpy as np
import sys
import matplotlib.pyplot as plt
from model import vgg16
import cv2

use_cuda = torch.cuda.is_available()

net = vgg16()
if use_cuda:
    net = net.cuda()
net.load_state_dict(torch.load('vgg16.pt'))
net.eval()
criterion = nn.CrossEntropyLoss()
loss_list, acc_list = [], []


def test(image):
    h, w, c = image.shape
    transform1 = transforms.Compose([
        transforms.ToTensor(),
    ])
    images = transform1(image).view(1, c, h, w)
    print(images.numpy().shape)
Exemple #20
0
if args.model not in check:
    print("Error: Chosen Model not in", check)
    exit()

import tensorflow as tf
import data
import model

print("Loading data...")
x_train, y_train, x_test, y_test = data.load()

if args.new == True:
    if args.model == "diyModel":
        model = model.diyModel()
    elif args.model == "VGG16":
        model = model.vgg16()
    elif args.model == "VGG19":
        model = model.vgg19()
    elif args.model == "ResNet50":
        model = model.resnet50()
    elif args.model == "ResNet101":
        model = model.resnet101()
else:
    model = tf.keras.models.load_model("./" + str(args.model) + ".h5")

model.fit(x_train,
          y_train,
          validation_data=(x_test, y_test),
          epochs=args.epochs,
          batch_size=args.batch_size)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=True,
                                          num_workers=2)

testset = torchvision.datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=False,
                                       transform=test_transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=64,
                                         shuffle=False,
                                         num_workers=2)

# Define a Convolution Neural Network
net = vgg16(num_classes=10)

if gpu_statue:
    net = net.cuda()
    print('*' * 26, '使用gpu', '*' * 26)
else:
    print('*' * 26, '使用cpu', '*' * 26)

# Define a Loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)

tr_acc, ts_acc, loss_p, time_p = [], [], [], []

# Train and test the network
start_time = time.time()
Exemple #22
0
    mean = np.array([[[0.485, 0.456, 0.406]]])
    img = img * std + mean
    return (img * 255).astype(np.uint8)


if __name__ == '__main__':
    reload(dataset)
    parser = argparse.ArgumentParser()
    parser.add_argument('--data',
                        default='/mnt/external/Data/BSR/BSDS500/data/images')
    parser.add_argument('--batch-size', default=5, type=int)
    parser.add_argument('--num-thread', default=4, type=int)

    args = parser.parse_args()

    vgg = model.vgg16(pretrained=True)
    # vgg.cuda()
    #
    # # Data loading code
    # transform = transforms.Compose([
    # transforms.CenterCrop(224),
    # transforms.RandomHorizontalFlip(),
    # transforms.ToTensor(),
    # transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ],
    # std = [ 0.229, 0.224, 0.225 ]),
    # ])
    #
    # testdir = os.path.join(args.data, 'test')
    # test_set = dataset.BSDS500(testdir, transform)
    # test_loader = torch.utils.data.DataLoader(
    #     test_set,
Exemple #23
0
# torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(random_seed)

# -----------------------loading the dataset------------------------
dataSet = "cifar"
# train_loader, val_loader = mnist_loaders(batch_train, random_seed, show_sample=True)
train_loader, val_loader = cifar_loaders(batch_train,
                                         random_seed,
                                         show_sample=True)
# ------------------------------------------------------------------

# ------------------------initialize network------------------------
modelName = "vgg16"
net = model.vgg16()
net.train().cuda()

# -----------------------------optimiser----------------------------
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=learning_rate,
                      momentum=0.9,
                      weight_decay=5e-4)

start_ts = time.time()

losses = []
print(
    f"--------------------------------{dataSet}_{modelName}--------------------------------"
)
Exemple #24
0
from model import vgg16
import torch.nn as nn
from torchvision.transforms import ToTensor, ToPILImage
from PIL import Image
to_tensor = ToTensor()
to_pil = ToPILImage
img1 = Image.open(r'D:\Data\ship_data\data\ship_saliency\Ship_Data2\imgs\000000.jpg')
#img1.show()
input = to_tensor(img1).unsqueeze(0)
print(input.size())
base = {'352': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512]}
vgg = nn.ModuleList(vgg16(base['352'],3))
for layer in vgg:
    input = layer(input)

out = input
print(out.size())