示例#1
0
def load_my_model(arch_file=None, weight_file=None):
    if arch_file is None:
        model = resnet_model()
    with open(arch_file, 'r') as json_file:
        architecture = json.load(json_file)
        model = model_from_json(json.dumps(architecture))
        # model = model_from_json(arch_file)

    model.load_weights(weight_file)
    return model
示例#2
0
文件: train.py 项目: zhushaoquan/DMML
def make_model(args, gids=None):
    """
    Initialize ResNet-50 model.
    """
    if 'softmax' in args.loss_type:
        if args.dataset == 'market1501':
            num_classes = 751
        elif args.dataset == 'duke':
            num_classes = 702
        else:
            raise NotImplementedError

        model = resnet_model(num_classes=num_classes, include_top=True,
                             remove_downsample=args.remove_downsample)
    else:
        model = resnet_model(remove_downsample=args.remove_downsample)

    if gids is not None:
        model = model.cuda(gids[0])
        if len(gids) > 1:
            model = DataParallel(model, gids)

    return model    
示例#3
0
def mnist_model(image, labels, mesh):
    """The model.
	Args:
		image: tf.Tensor with shape [batch, 28*28]
		labels: a tf.Tensor with shape [batch] and dtype tf.int32
		mesh: a mtf.Mesh
	Returns:
		logits: a mtf.Tensor with shape [batch, 10]
		loss: a mtf.Tensor with shape []
	"""
    batch_dim = mtf.Dimension("batch", FLAGS.batch_size)
    rows_dim = mtf.Dimension("rows_size", image_height)
    cols_dim = mtf.Dimension("cols_size", image_width)
    channel_dim = mtf.Dimension("image_channel", num_channels)
    classes_dim = mtf.Dimension(name='classesnum', size=classesnum)
    x = mtf.import_tf_tensor(
        mesh,
        tf.reshape(
            image,
            [FLAGS.batch_size, image_height, image_width, num_channels]),
        mtf.Shape([batch_dim, rows_dim, cols_dim, channel_dim]))
    # x = mtf.transpose(x, [batch_dim, rows_dim, cols_dim, channel_dim])
    # print(x.shape)
    logits = resnet_model(x, classes_dim=classes_dim, depth=depth)
    logits = mtf.cast(logits, dtype=tf.float32)

    if labels is None:
        loss = None
    else:
        labels = mtf.import_tf_tensor(mesh,
                                      tf.reshape(labels, [FLAGS.batch_size]),
                                      mtf.Shape([batch_dim]))
        loss = mtf.layers.softmax_cross_entropy_with_logits(
            logits, mtf.one_hot(labels, classes_dim), classes_dim)
        loss = mtf.reduce_mean(loss)
    return logits, loss
    input_path = os.path.join(DATA_DIR,'*','train-*.tfrecord')
    mode = 'validation'
else:
    input_path = os.path.join(DATA_DIR, 'test', 'test.tfrecord')
    mode='test'

# Creating data iterator based on tensorflow Dataset api
test_iter, test_data = data.extract_image(input_path, BATCH_SIZE, SEED, mode=mode)


input_shape = (None, 224, 224, 3)
label_shape = (None, 6)

# Loading base model whose embedding layer will be extracted
if base_model == 'resnet_50':
    _, x, _, is_train, features = model.resnet_model(input_shape, label_shape)
    feature_size = 2048
elif base_model == 'inception_resnet':
    _, x, _, is_train, features = model.inception_resnet_model(input_shape, label_shape)
    feature_size = 1536
else:
    AssertionError('Unknown base model present')


sess = tf.Session()

# Loading trained model checkpoint
saver = utils.load_checkpoint(os.path.join(model_dir,'model'), sess, 'model')
saver_unloaded = utils.load_checkpoint(os.path.join(model_dir,'model_unsaved'), sess, 'model_unsaved')

# Extracting feature dictionary for the provided dataset
示例#5
0
def main(gid=None,
         dataset=None,
         dataset_root=None,
         which=None,
         exp_dir=None,
         verbose=False):
    """
    Configs
    """
    GPU_ID = 0  # gpu id or 'None'
    BATCH_SIZE = 32  # batch size when extracting query and gallery features
    IMG_SIZE = (256, 128)
    DATASET = 'market1501'  # market1501, duke
    WHICH = 'last'  # which model to load
    EXP_DIR = './exp/dmml/market1501'
    NORMALIZE_FEATURE = True  # whether to normalize features in evaluation
    NUM_WORKERS = 8

    if gid is not None:
        GPU_ID = gid
    if dataset is not None:
        DATASET = dataset
    if which is not None:
        WHICH = which
    if exp_dir is not None:
        EXP_DIR = exp_dir
    """
    Datasets
    """
    if dataset_root is None:
        if DATASET == 'market1501':
            dataset_root = './datasets/market1501'
        elif DATASET == 'duke':
            dataset_root = './datasets/duke'
        else:
            raise NotImplementedError

    print('Generating dataset...')
    eval_transform = transforms.Compose([
        transforms.Resize(IMG_SIZE, interpolation=3),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    if DATASET == 'market1501':
        datasets = {
            x: Market1501(dataset_root, transform=eval_transform, split=x)
            for x in ['gallery', 'query']
        }
        num_classes = 751
    elif DATASET == 'duke':
        datasets = {
            x: DukeMTMC_reID(dataset_root, transform=eval_transform, split=x)
            for x in ['gallery', 'query']
        }
        num_classes = 702

    dataloaders = {
        x: torch.utils.data.DataLoader(datasets[x],
                                       batch_size=BATCH_SIZE,
                                       shuffle=False,
                                       num_workers=NUM_WORKERS)
        for x in ['gallery', 'query']
    }
    print('Done.')
    """
    Model
    """
    print('Restoring model...')

    ### You may need to modify the arguments of the model according to your training settings.

    model = resnet_model(remove_downsample=True)
    # model = resnet_model(num_classes=num_classes, include_top=False, remove_downsample=False)

    model.load_state_dict(torch.load('{}/model_{}.pth'.format(EXP_DIR, WHICH)),
                          strict=False)
    if GPU_ID is not None:
        model.cuda(GPU_ID)
    model.eval()
    print('Done.')
    """
    Test
    """
    print('Getting image ID...')
    gallery_cam, gallery_label = get_id(datasets['gallery'].imgs,
                                        dataset=DATASET)
    query_cam, query_label = get_id(datasets['query'].imgs, dataset=DATASET)
    print('Done.')

    # Extract feature
    print('Extracting gallery feature...')
    gallery_feature, g_images = extract_feature(
        model,
        dataloaders['gallery'],
        normalize_feature=NORMALIZE_FEATURE,
        gid=GPU_ID,
        verbose=verbose)
    print('Done.')
    print('Extracting query feature...')
    query_feature, q_images = extract_feature(
        model,
        dataloaders['query'],
        normalize_feature=NORMALIZE_FEATURE,
        gid=GPU_ID,
        verbose=verbose)
    print('Done.')

    query_cam = np.array(query_cam)
    query_label = np.array(query_label)
    gallery_cam = np.array(gallery_cam)
    gallery_label = np.array(gallery_label)

    # Evaluate
    print('Evaluating...')
    CMC = torch.IntTensor(len(gallery_label)).zero_()
    ap = 0.0
    for i in range(len(query_label)):
        ap_tmp, CMC_tmp = evaluate(query_feature[i], query_label[i],
                                   query_cam[i], gallery_feature,
                                   gallery_label, gallery_cam)
        if CMC_tmp[0] == -1:
            continue
        CMC = CMC + CMC_tmp
        ap += ap_tmp

    CMC = CMC.float()
    CMC = CMC / len(query_label)  # average CMC
    print('Done.')
    print('Rank-1: {:.6f} Rank-5: {:.6f} Rank-10: {:.6f} mAP: {:.6f}'.format(
        CMC[0].item(), CMC[4].item(), CMC[9].item(), ap / len(query_label)))

    return ap / len(query_label), CMC
DATA_DIR = ARGS.data_dir

if ARGS.model_type == 'base':
    base_model = config['pretrained_model']

    input_path = os.path.join(DATA_DIR, 'test', 'test.tfrecord')
    test_iter, test_data = data.extract_image(input_path,
                                              BATCH_SIZE,
                                              SEED,
                                              mode='test')

    input_shape = (None, 224, 224, 3)
    label_shape = (None, 6)

    if base_model == 'resnet_50':
        preds, x, y, is_train, _ = model.resnet_model(input_shape, label_shape)
    elif base_model == 'inception_resnet':
        preds, x, y, is_train, _ = model.inception_resnet_model(
            input_shape, label_shape)
    else:
        AssertionError('Unknown base model present')

    sess = tf.Session()

    saver = utils.load_checkpoint(os.path.join(model_dir, 'model'), sess,
                                  'model')
    saver_unloaded = utils.load_checkpoint(
        os.path.join(model_dir, 'model_unsaved'), sess, 'model_unsaved')

    data_pl = [x, is_train]
示例#7
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'model'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'model'))

    # load train data(cifar10, class: 10)
    if FLAGS.problem == 'cifar10':
        (x_train, y_train), (x_test,
                             y_test) = tf.keras.datasets.cifar10.load_data()
        num_class = 10
    # load train data(cifar100, class: 100)
    if FLAGS.problem == 'cifar100':
        (x_train, y_train), (x_test,
                             y_test) = tf.keras.datasets.cifar100.load_data(
                                 label_mode='fine')
        num_class = 100

    # preprocess
    train_gen = tf.keras.preprocessing.image.ImageDataGenerator(
        rescale=1.0 / 255,
        horizontal_flip=True,
        width_shift_range=4.0 / 32.0,
        height_shift_range=4.0 / 32.0)
    test_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 /
                                                               255)

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:

        if FLAGS.is_octconv:
            network = octconv_resnet50
        else:
            network = normal_resnet50

        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'input_size': FLAGS.image_size,
            'alpha': FLAGS.alpha,
            'network': network,
            'num_class': num_class,
            'is_training': True,
            'learning_rate': 1e-3
        }

        Model = resnet_model(**kwargs)

        utils.cal_parameter()

        # prepare tensorboard
        writer_train = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'train'), sess.graph)

        value_loss = tf.Variable(0.0)
        tf.summary.scalar("train_loss", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        tbar = tqdm(range(FLAGS.epoch), ascii=True)
        epoch_loss = []
        for i in tbar:
            train_data_shuffled = train_gen.flow(x_train,
                                                 y_train,
                                                 FLAGS.batch_size,
                                                 shuffle=True)

            # one epoch
            for iter in range(x_train.shape[0] // FLAGS.batch_size):
                train_data_batch = next(train_data_shuffled)

                label = tf.keras.utils.to_categorical(train_data_batch[1],
                                                      num_classes=num_class)

                # training
                train_loss = Model.update(train_data_batch[0], label)
                epoch_loss.append(np.mean(train_loss))

                s = "epoch:{}, step:{}, Loss: {:.4f}".format(
                    i, iter, np.mean(epoch_loss))
                tbar.set_description(s)

            summary_train_loss = sess.run(merge_op,
                                          {value_loss: np.mean(epoch_loss)})
            writer_train.add_summary(summary_train_loss, i)

            epoch_loss.clear()

            # save model
            Model.save_model(i)
示例#8
0
train_data_gen = image_generator.flow_from_directory(directory=str(data_path),
                                                     batch_size=BATCH_SIZE,
                                                     shuffle=True,
                                                     target_size=(IMG_HEIGHT,
                                                                  IMG_WIDTH),
                                                     classes=list(CLASS_NAMES))
test_data_gen = image_generator.flow_from_directory(
    directory=str(data_path_validation),
    batch_size=BATCH_SIZE,
    shuffle=False,
    target_size=(IMG_HEIGHT, IMG_WIDTH),
    classes=list(CLASS_NAMES))

IMG_SHAPE = (IMG_HEIGHT, IMG_WIDTH, 3)

model = resnet_model(IMG_SHAPE)

loss_object = tf.keras.losses.CategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
writer = tf.summary.create_file_writer(
    "/tmp/mylogs/test_classification_dataaugmentation_{}".format(timestamp))


@tf.function
def train_step(images, labels, step):
    with tf.GradientTape() as tape:
        predictions = model(images)
        loss = loss_object(labels, predictions)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    eq = tf.equal(tf.argmax(labels, -1), tf.argmax(predictions, -1))
示例#9
0
文件: test.py 项目: TGU1912/OctConv
def main(argv):

    # turn off log message
    tf.logging.set_verbosity(tf.logging.WARN)

    # check folder
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))

    # load train data(cifar10, class: 10)
    if FLAGS.problem == 'cifar10':
        (x_train, y_train), (x_test,
                             y_test) = tf.keras.datasets.cifar10.load_data()
        num_class = 10
    # load train data(cifar100, class: 100)
    if FLAGS.problem == 'cifar100':
        (x_train, y_train), (x_test,
                             y_test) = tf.keras.datasets.cifar100.load_data(
                                 label_mode='fine')
        num_class = 100

    # preprocess
    test_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 /
                                                               255)

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:

        if FLAGS.is_octconv:
            network = octconv_resnet50
        else:
            network = normal_resnet50

        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'input_size': FLAGS.image_size,
            'alpha': FLAGS.alpha,
            'network': network,
            'num_class': num_class,
            'is_training': False,
            'learning_rate': 1e-4
        }

        Model = resnet_model(**kwargs)

        utils.cal_parameter()

        # prepare tensorboard
        writer_test = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'test'))

        value_acc = tf.Variable(0.0)
        tf.summary.scalar("test_accuracy", value_acc)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        tbar = tqdm(range(FLAGS.epoch), ascii=True)
        epoch_acc = []
        for i in tbar:
            test_data = test_gen.flow(x_test,
                                      y_test,
                                      FLAGS.batch_size,
                                      shuffle=False)

            # one epoch
            Model.restore_model(FLAGS.model_path + '/model_{}'.format(i))
            for iter in range(x_test.shape[0] // FLAGS.batch_size):
                train_data_batch = next(test_data)

                label = tf.keras.utils.to_categorical(train_data_batch[1],
                                                      num_classes=num_class)

                test_acc = Model.test(train_data_batch[0], label)
                epoch_acc.append(np.mean(test_acc))

                s = "epoch:{}, acc: {:.4f}".format(i, np.mean(epoch_acc))
                tbar.set_description(s)

            summary_test_acc = sess.run(merge_op,
                                        {value_acc: np.mean(epoch_acc)})
            writer_test.add_summary(summary_test_acc, i)

            epoch_acc.clear()
示例#10
0
train_input_path = os.path.join(ARGS.data_dir, 'train', 'train-*.tfrecord')
val_input_path = os.path.join(ARGS.data_dir, 'validation', 'train-*.tfrecord')
SEED = 88

input_shape = (None, 224, 224, 3)
label_shape = (None, 6)

# Creating save directory where model checkpoints and
# tensorboard files will be stored
save_dir = utils.create_save_dir(ARGS, EXPERIMENT_TIMESTAMP, 'base_model')

# loading resnet-50 or inception resnet computation
# graph and adding loss and optimizer to the graph
if ARGS.pretrained_model == 'resnet_50':
    outputs, inputs, labels, is_train, _ = model.resnet_model(
        input_shape, label_shape)
if ARGS.pretrained_model == 'inception_resnet':
    outputs, inputs, labels, is_train, _ = model.inception_resnet_model(
        input_shape, label_shape)
loss = model.build_loss(labels, outputs, loss_name=ARGS.loss)

prediction = tf.cast(tf.greater(tf.sigmoid(outputs), 0.5), tf.float32)
correct_prediction = tf.equal(labels[:, -1], prediction[:, -1])

correct_images = tf.boolean_mask(inputs, correct_prediction)
wrong_images = tf.boolean_mask(inputs, tf.logical_not(correct_prediction))

train_loss = tf.summary.scalar('train_loss', loss)
validation_loss = tf.summary.scalar('val_loss', loss)
# val_correct_img = tf.summary.image('correct_imgs', correct_images, max_outputs=5)
# val_wrong_img = tf.summary.image('wrong_imgs', wrong_images, max_outputs=5)
示例#11
0
文件: train.py 项目: TGU1912/OctConv
def main(argv):
    # check folder
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'tensorboard'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'tensorboard'))
    if not (os.path.exists(os.path.join(FLAGS.outdir, 'model'))):
        os.makedirs(os.path.join(FLAGS.outdir, 'model'))

    # get file list
    train_data_list = glob.glob(FLAGS.indir + '/*')
    # shuffle list
    random.shuffle(train_data_list)

    # load train data
    train_set = tf.data.Dataset.list_files(train_data_list)
    train_set = train_set.apply(
        tf.contrib.data.parallel_interleave(
            lambda x: tf.data.TFRecordDataset(x), cycle_length=os.cpu_count()))
    train_set = train_set.map(utils._parse_function,
                              num_parallel_calls=os.cpu_count())
    train_set = train_set.shuffle(buffer_size=FLAGS.shuffle_buffer_size)
    train_set = train_set.repeat()
    train_set = train_set.batch(FLAGS.batch_size)
    train_set = train_set.prefetch(1)
    train_iter = train_set.make_one_shot_iterator()
    train_data = train_iter.get_next()

    # step of each epoch
    if FLAGS.num_data % FLAGS.batch_size == 0:
        step_of_epoch = FLAGS.num_data / FLAGS.batch_size
    else:
        step_of_epoch = FLAGS.num_data // FLAGS.batch_size + 1

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:

        if FLAGS.is_octconv:
            network = octconv_resnet50
        else:
            network = normal_resnet50

        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'input_size': FLAGS.image_size,
            'alpha': FLAGS.alpha,
            'network': network,
            'is_training': True,
            'learning_rate': 1e-4
        }

        Model = resnet_model(**kwargs)

        utils.cal_parameter()

        # prepare tensorboard
        writer_train = tf.summary.FileWriter(
            os.path.join(FLAGS.outdir, 'tensorboard', 'train'), sess.graph)

        value_loss = tf.Variable(0.0)
        tf.summary.scalar("train_loss", value_loss)
        merge_op = tf.summary.merge_all()

        # initialize
        sess.run(init_op)

        tbar = tqdm(range(FLAGS.epoch), ascii=True)
        epoch_loss = []
        for i in tbar:

            # one epoch
            for step in range(step_of_epoch):
                train_data_batch, train_label_batch = sess.run(train_data)
                train_loss = Model.update(train_data_batch, train_label_batch)
                epoch_loss.append(train_loss)
                s = "epoch:{}, step:{}, Loss: {:.4f}".format(
                    i, step, np.mean(epoch_loss))
                tbar.set_description(s)

            summary_train_loss = sess.run(merge_op,
                                          {value_loss: np.mean(epoch_loss)})
            writer_train.add_summary(summary_train_loss, i)

            epoch_loss.clear()

            # save model
            Model.save_model(i)
from tensorflow.keras.preprocessing import image
from model import resnet_model, activation_model
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import glob

if __name__ == '__main__':

    # image size is default of resnet
    image_size = (224, 244, 3)
    # image directory
    imgfiles = glob.glob('./*.png')

    # output probs (Dense) ,(None, 1000)
    resnet = resnet_model(1000)
    # this model is resnet till last conv, not Denses
    # output post_relu (Activation) (None, 7, 7, 2048)
    activation_layer = resnet.get_layer('post_relu')
    model = activation_model(resnet)

    # getting last layer weights
    final = resnet.get_layer('predictions')
    w = final.get_weights()

    # input processing
    x = np.expand_dims(image.img_to_array(
        image.load_img(imgfiles[2], target_size=(224, 224, 3))),
                       axis=0)
    x = preprocess_input(x)