Example #1
0
def main(args):
    global DEVICE
    if args.gpu:
        if torch.cuda.is_available():
            DEVICE = torch.device('cuda')
        else:
            print("CUDA is not available, reverting to CPU")

    image_datasets, dataloaders = get_image_data(args.data_dir)
    out_features = len(os.listdir(args.data_dir + '/train'))
    model, criterion, optimizer = get_training_base(args.arch,
                                                    args.hidden_units,
                                                    out_features,
                                                    args.learning_rate)
    model.class_to_idx = image_datasets['train'].class_to_idx
    model.idx_to_class = {v: k for k, v in model.class_to_idx.items()}
    model, optimizer = train_model(model, dataloaders['train'], criterion,
                                   optimizer, dataloaders['valid'],
                                   args.epochs)
    test_loss, test_accuracy = evaluate_model(model, dataloaders['test'],
                                              criterion)
    print(f"Test loss: {test_loss:.3f}.. "
          f"Test Accuracy: {test_accuracy:.3f}")
    save_checkpoint(model,
                    optimizer,
                    args.arch,
                    "Adam",
                    "NLLLoss",
                    checkpoint_name=os.path.join(args.save_dir,
                                                 'trained_model.pth'))
Example #2
0
def train_image():
    x_train, x_valid, x_test, y_train, y_valid, y_test = utils.get_image_data(
        image_size)

    #instantiate the class
    models = model.Image_classification_models(num_classes=len(utils.image_label_list), \
     image_size=image_size)

    model_list = models.get_model_list()
    trained_model = []

    #train all the models in model_list
    for one_model in model_list:
        one_model.fit(x_train, y_train, epochs=hm_epoch, batch_size=hm_batch)
        trained_model.append(one_model)

    #evluate the trained models
    best_acc = 0
    for one_model in trained_model:
        train_metric_result = one_model.evaluate(x_train,
                                                 y_train,
                                                 batch_size=hm_batch,
                                                 verbose=0)
        validation_metric_result = one_model.evaluate(x_valid,
                                                      y_valid,
                                                      batch_size=hm_batch,
                                                      verbose=0)

        print('Model name:', one_model.name)
        print('Train Accuracy:', train_metric_result[1])
        print('Validation Accuracy:', validation_metric_result[1])
        print('\n')

        #pick the best model based on their accuracy
        if validation_metric_result[1] > best_acc:
            best_acc = validation_metric_result[1]
            best_model = one_model

    print('The best model is: %s with %.2f %% accuracy' %
          (best_model.name, best_acc))

    #test the model on test set
    best_model_train = best_model.evaluate(x_train,
                                           y_train,
                                           batch_size=hm_batch,
                                           verbose=0)
    best_model_test = best_model.evaluate(x_test,
                                          y_test,
                                          batch_size=hm_batch,
                                          verbose=0)

    print('Train loss: %.3f acc %.3f' %
          (best_model_train[0], best_model_train[1]))
    print('Test loss: %.3f acc %.3f' %
          (best_model_test[0], best_model_test[1]))

    #save the best model
    print('Saving model...')
    best_model.save(model_path + '/best_image_model.h5')
def train():
    cfg = Config()
    vgg = Vgg16().to(cfg.device).eval()
    for param in vgg.parameters():
        param.requires_grad = False
    # 固定网络的参数
    content = utils.get_image_data(cfg.content_path,
                                   cfg.image_size).to(cfg.device)
    style = utils.get_image_data(cfg.style_path, cfg.image_size).to(cfg.device)
    target = content.clone().requires_grad_(True)

    content_features = vgg(content)
    style_features = vgg(style)
    gram_styles = [
        utils.gram_matrix(x).requires_grad_(False) for x in style_features
    ]
    batches, channels, h, w = list(gram_styles.size())
    # 注意要使style——gram的requires_grad置于False,F.mse_loss要求
    optimizer = t.optim.Adam([target], lr=cfg.lr)
    for epoch in range(cfg.epoches):
        target_features = vgg(target)
        content_loss = F.mse_loss(
            target_features.relu3_3,
            content_features.relu3_3.requires_grad_(False))

        style_loss = 0.
        for tar, gram_style in zip(target_features, gram_styles):
            tar_gram = utils.gram_matrix(tar)
            style_loss += F.mse_loss(tar_gram, gram_style)
        style_loss = style_loss / (2 * channels * h * w) ^ 2
        total_loss = cfg.content_weight * content_loss + cfg.style_weight * style_loss
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()
        if epoch % 100 == 0:
            print(
                "iteration:{}  Content loss:{:.4f},Style loss:{:.4f},Total loss:{:.4f}"
                .format(epoch + 1, content_loss.item(), style_loss.item(),
                        total_loss.item()))

    denorm = tv.transforms.Normalize([-2.12, -2.04, -1.80], [4.37, 4.46, 4.44])
    target = denorm(target.squeeze().to('cpu')).clamp_(min=0, max=1)
    tv.utils.save_image(
        target, cfg.combined_path + '/output ' +
        str(cfg.content_weight / cfg.style_weight) + '.png')
Example #4
0
def pre_train():
    print('pre-train of two models')

    word_to_vec.train_word2vec()

    train_x = utils.get_image_data()
    train_y = utils.get_one_hot_label_data()
    model = visual_model.AlexNet()
    model.train(train_x, train_y)
    print('Alex model and Word2Vec model have been trained..')
"""A convolutional autoencoder trained to reconstruct my face"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import utils
import cv2

# mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# images, _, _ = utils.get_data('data.csv', flat=False, rgb=False)

im_w = 47
im_h = 70
im_ch = 1  # number of color channels

images = utils.get_image_data('me/1', size=[im_w, im_h])
print 'Finished loading data'


# helper functions for weights and biases
def weight(shape):
    return tf.Variable(tf.truncated_normal(shape=shape, stddev=0.2))


def bias(shape):
    return tf.Variable(tf.constant(0.1, shape=shape))


# helper functions for convolution and pooling
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
Example #6
0
def train_image():
    x_train, x_valid, x_test, y_train, y_valid, y_test = utils.get_image_data(
        image_size)
    print('Train data quantity:', x_train.shape[0])
    print('Test data quantity:', x_test.shape[0])
    print('Val data quantity:', valid.shape[0])

    #instantiate the class
    models = model.Image_classification_models(num_classes=len(utils.image_label_list), \
            image_size=image_size)

    model_list = models.get_model_list()
    trained_model = []

    #train all the models in model_list
    for one_model in model_list:
        one_model.fit(x_train, y_train, epochs=hm_epoch, batch_size=hm_batch)
        trained_model.append(one_model)

    #evluate the trained models
    best_acc = 0
    for one_model in trained_model:
        train_metric_result = one_model.evaluate(x_train,
                                                 y_train,
                                                 batch_size=hm_batch,
                                                 verbose=0)
        validation_metric_result = one_model.evaluate(x_valid,
                                                      y_valid,
                                                      batch_size=hm_batch,
                                                      verbose=0)

        print('Model name:', one_model.name)
        print('Train Accuracy:', train_metric_result[1])
        print('Train Precision:', train_metric_result[2])
        print('Train Recall:', train_metric_result[3])
        print('Train F1:', train_metric_result[4])
        print('Validation Accuracy:', validation_metric_result[1])
        print('Validation Precision:', validation_metric_result[2])
        print('Validation Recall:', validation_metric_result[3])
        print('Validation F1:', validation_metric_result[4])
        print('\n')

        #pick the best model based on their accuracy
        if validation_metric_result[1] > best_acc:
            best_acc = validation_metric_result[1]
            best_model_precision = validation_metric_result[2]
            best_model_recall = validation_metric_result[3]
            best_model_f1 = validation_metric_result[4]
            best_model = one_model

    print('The best model is: %s with' % (best_model.name))
    print('%.2f %% accuracy' % (best_acc))
    print('%.2f %% precision' % (best_model_precision))
    print('%.2f %% recall' % (best_model_recall))
    print('%.2f %% f1' % (best_model_f1))

    #test the model on test set
    best_model_train = best_model.evaluate(x_train,
                                           y_train,
                                           batch_size=hm_batch,
                                           verbose=0)
    best_model_test = best_model.evaluate(x_test,
                                          y_test,
                                          batch_size=hm_batch,
                                          verbose=0)

    print('Train loss: %.3f acc: %.3f precision: %.3 recall: %.3f f1: %.3f' \
            %(best_model_train[0], best_model_train[1], best_model_train[2], best_model_train[3], best_model_train[4]))
    print('Test loss: %.3f acc: %.3f precision: %.3 recall: %.3f f1: %.3f' \
            %(best_model_test[0], best_model_test[1], best_model_test[2], best_model_test[3], best_model_test[4]))

    #save the best model
    print('Saving model...')
    best_model.save(model_path + '/best_image_model.h5')
Example #7
0
from utils import get_image_data, padded_image, is_border_point, is_line_end_point, is_not_simple_point, get_4_adjacent_neighbours, print_image_data, is_simple_point

if __name__ == '__main__':
    image_data = get_image_data()
    image_data = padded_image(image_data)
    count = 0
    c = 0
    li = []
    for i in range(len(image_data)):
        for j in range(len(image_data[0])):
            if i != 0 and j != 0 and i != len(image_data) - 1 and j != len(
                    image_data[0]) - 1:
                c = c + 1
                if i - 1 == 11 and image_data[i][j] == 1:
                    print 'yes'
                # if image_data[i][j]==1 and (is_line_end_point((i,j), image_data) or is_not_simple_point((i,j), image_data)):
                if image_data[i][j] == 1 and not (is_not_simple_point(
                    (i, j), image_data)):
                    count = count + 1
                    li.append((i - 1, j - 1))
    print count, c
    print li
    print is_simple_point((6, 3), image_data)
    print not (is_not_simple_point((6, 3), image_data))
"""A convolutional autoencoder"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from tensorflow.examples.tutorials.mnist import input_data
import utils
import cv2

# mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

im_w = 96
im_h = 54
im_ch = 3  # number of color channels
images = utils.get_image_data(
    'me', size=[im_w, im_h])  # returns all the images in the 'me/' directory
print 'Finished loading data'


# helper functions for weights and biases
def weight(shape):
    return tf.Variable(tf.truncated_normal(shape=shape, stddev=0.2))


def bias(shape):
    return tf.Variable(tf.constant(0.1, shape=shape))


# helper functions for convolution and pooling
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
                    }
                    show_loss, show_accu, _ = sess.run(
                        [self.loss, self.accuracy, self.optimizer],
                        feed_dict=feed)

                    if global_steps % show_every_n == 0:
                        print(
                            'epoch: {}/{}..'.format(epoch + 1,
                                                    self.num_epoches + 1),
                            'global_step: {}..'.format(global_steps),
                            'loss: {:.3f}..'.format(show_loss),
                            'accuracy: {:.2f}..'.format(show_accu))

                    if global_steps % saved_every_n == 0:
                        saver.save(
                            sess, save_path +
                            "e{}_s{}.ckpt".format(epoch, global_steps))
            saver.save(sess, save_path + "lastest.ckpt")

        print('training finished')


if __name__ == '__main__':
    #加载训练data,label已经被转换为one_hot
    train_x = utils.get_image_data()
    train_y = utils.get_one_hot_label_data()
    label2int, int2label = utils.get_parameter()

    model = AlexNet()
    model.train(train_x, train_y)
    def __init__(self, opt):
        self.opt = opt
        self.batch_size = self.opt.batch_size
        self.seq_per_img = opt.seq_per_img

        # feature related options
        self.use_fc = getattr(opt, 'use_fc', True)
        self.use_att = getattr(opt, 'use_att', True)
        self.use_box = getattr(opt, 'use_box', 0)
        self.norm_att_feat = getattr(opt, 'norm_att_feat', 0)
        self.norm_box_feat = getattr(opt, 'norm_box_feat', 0)

        # load the json file which contains additional information about the dataset
        print('DataLoader loading json file: ', opt.input_json)
        self.info = json.load(open(self.opt.input_json))
        self.ix_to_word = self.info['ix_to_word']
        self.vocab_size = len(self.ix_to_word)
        print('vocab size is ', self.vocab_size)

        # open the hdf5 file
        print('DataLoader loading h5 file: ', opt.input_fc_dir,
              opt.input_att_dir, opt.input_box_dir, opt.input_label_h5)
        self.h5_label_file = h5py.File(self.opt.input_label_h5,
                                       'r',
                                       driver='core')

        self.fc_loader = HybridLoader(self.opt.input_fc_dir, '.npy')
        self.att_loader = HybridLoader(self.opt.input_att_dir, '.npz')
        self.box_loader = HybridLoader(self.opt.input_box_dir, '.npy')

        # load in the sequence data
        seq_size = self.h5_label_file['labels'].shape
        self.seq_length = seq_size[1]
        print('max sequence length in data is', self.seq_length)
        # load the pointers in full to RAM (should be small enough)
        self.label_start_ix = self.h5_label_file['label_start_ix'][:]
        self.label_end_ix = self.h5_label_file['label_end_ix'][:]

        self.num_images = self.label_start_ix.shape[0]
        print('read %d image features' % (self.num_images))

        # separate out indexes for each of the provided splits
        self.split_ix = {'train': [], 'val': [], 'test': []}
        for ix in range(len(self.info['images'])):
            img = self.info['images'][ix]
            if img['split'] == 'train':
                self.split_ix['train'].append(ix)
            elif img['split'] == 'val':
                self.split_ix['val'].append(ix)
            elif img['split'] == 'test':
                self.split_ix['test'].append(ix)
            elif opt.train_only == 0:  # restval
                self.split_ix['train'].append(ix)

        print('assigned %d images to split train' %
              len(self.split_ix['train']))
        print('assigned %d images to split val' % len(self.split_ix['val']))
        print('assigned %d images to split test' % len(self.split_ix['test']))

        self.iterators = {'train': 0, 'val': 0, 'test': 0}

        self._prefetch_process = {}  # The three prefetch process
        for split in self.iterators.keys():
            self._prefetch_process[split] = BlobFetcher(
                split, self, split == 'train')
            # Terminate the child process when the parent exists
        def cleanup():
            print('Terminating BlobFetcher')
            for split in self.iterators.keys():
                del self._prefetch_process[split]

        import atexit
        atexit.register(cleanup)

        # for graphs
        self.coco_image_data = utils.get_image_data()
        self.objects = utils.get_object_types()
        self.predicates = utils.get_predicate_types()
        self.scene_graphs = h5py.File(
            'exp/sg_results/coco_scenegraphs_px2graph.h5', 'r')
        self.info_scene_graphs = (self.coco_image_data, self.scene_graphs,
                                  self.objects, self.predicates)
        self.scene_graph_ix = 0