Exemple #1
0
def create_model(model='',image_shape=[224,224],class_num=9):

    train_image = fluid.layers.data(name='img', shape=[3] + image_shape, dtype='float32')
    
    predict = AlexNet().net(train_image)
    print('train_image.shape = ',train_image.shape)
    return predict
Exemple #2
0
def alexnet_train():
    train_datagen = ImageDataGenerator(rescale=1. / 255)
    validation_datagen = ImageDataGenerator(rescale=1. / 255)
    train_generator = train_datagen.flow_from_directory(
        train_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        shuffle=True,
        class_mode='binary')
    validation_generator = validation_datagen.flow_from_directory(
        validation_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        shuffle=True,
        class_mode='binary')

    model = AlexNet(input_shape=(img_width, img_height, 3),
                    include_top=True,
                    save_weights=save_weights_dir)
    model.compile(optimizer=Adam(),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    callback_list = [
        EarlyStopping(monitor='val_acc', patience=5),
        ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3)
    ]

    model.fit_generator(train_generator,
                        steps_per_epoch=100,
                        epochs=20,
                        validation_data=validation_generator,
                        validation_steps=len(validation_generator),
                        callbacks=callback_list,
                        verbose=1)
    return model
# initialize the training and validation dataset generators
trainGen = HDF5DatasetGenerator(config.TRAIN_HDF5,
                                128,
                                aug=aug,
                                preprocessors=[pp, mp, iap],
                                classes=2)
valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                              128,
                              aug=aug,
                              preprocessors=[sp, mp, iap],
                              classes=2)

# initialize the optimizer and compile model
print("[INFO] compiling model")
opt = Adam(lr=1e-3)
model = AlexNet.build(width=227, height=227, depth=3, classes=2, reg=0.0002)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics="accuracy")

# construct the callback to save only the best model to disk based on the validation loss
checkpoint = ModelCheckpoint(args["weights"],
                             monitor="val_loss",
                             save_best_only=True,
                             verbose=1)
callbacks = [checkpoint]

# train the network
model.fit_generator(trainGen.generator(),
                    steps_per_epoch=trainGen.numImages // 128,
                    validation_data=valGen.generator(),
                    validation_steps=valGen.numImages // 128,
                    epochs=75,
Exemple #4
0
    transforms = transforms.Compose([
        transforms.Resize(256),
        transforms.RandomResizedCrop(224),
        transforms.ToTensor(),  # normalize to [0, 1]
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]) 
    if args.imshow == True:
        train_dataset = selfData(args.train_img, args.train_lab, transforms)
        train_loader = DataLoader(train_dataset, batch_size = 64, shuffle = True, num_workers = 0, drop_last= False)
        imgs, labels = train_loader.__iter__().__next__()
        imshow(train_loader)

    if args.model == 'mAlexNet':
        net = mAlexNet().to(device)
    elif args.model == 'AlexNet':
        net = AlexNet().to(device)

    criterion = nn.CrossEntropyLoss()
    if args.path == '':
        train(args.epochs, args.train_img, args.train_lab, transforms, net, criterion)
        PATH = './model.pth'
        torch.save(net.state_dict(), PATH)
        if args.model == 'mAlexNet':
            net = mAlexNet().to(device)
        elif args.model == 'AlexNet':
            net = AlexNet().to(device)
        net.load_state_dict(torch.load(PATH))
    else:
        PATH = args.path
        if args.model == 'mAlexNet':
            net = mAlexNet().to(device)
Exemple #5
0
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True


# 设置随机数种子
setup_seed(20)

if __name__ == "__main__":

    print("load_net...")

    if args.model == 'mAlexNet':
        net = mAlexNet()
    elif args.model == 'AlexNet':
        net = AlexNet()
    elif args.model == "carnet":
        net = carNet()
    elif args.model == 'stn_shuf':
        net = stn_shufflenet()
    elif args.model == 'stn_trans_shuf':
        net = stn_trans_shufflenet()
    elif args.model == 'shuf':
        net = torchvision.models.shufflenet_v2_x1_0(pretrained=False,
                                                    num_classes=2)
    elif args.model == 'trans_shuf':
        net = trans_shufflenet(shuff_type=args.shuf_type)
        # print(args.shuf_type)
    # net = net.cuda()
    # for name, parameters in net.named_parameters():
    #     print(name, ':', parameters.size())
Exemple #6
0
        transforms.Resize(224),
        transforms.RandomResizedCrop(224),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        # transforms.Normalize(mean=[0.485, 0.456, 0.406],
        #                          std=[0.229, 0.224, 0.225])
    ])



if __name__=="__main__":

    print("load_net...")
    if args.model == 'mAlexNet':
        net = mAlexNet()
    elif args.model == 'AlexNet':
        net = AlexNet()
    elif args.model == "carnet":
        net=carNet()
    elif args.model=="googlenet":
        net=models.googlenet()
        num_fc = net.fc.in_features
        net.fc = nn.Linear(num_fc, 2)
    elif args.model=="vgg16":
        net=models.vgg16(pretrained=True)
        num_fc = net.classifier[6].in_features
        net.classifier[6] = torch.nn.Linear(num_fc, 2)
        for param in net.parameters():
            param.requires_grad = False
        # 但是参数全部固定了,也没法进行学习,所以我们不固定最后一层,即全连接层
        for param in net.classifier[6].parameters():
            param.requires_grad = True
Exemple #7
0
                                        shuffle=False,
                                        num_workers=8,
                                        pin_memory=True)
    setattr(dataloader['val'], 'total_item_len', len(val_set))
    dataloader['base'] = data.DataLoader(base_set,
                                         batch_size=config.batch_size,
                                         shuffle=False,
                                         num_workers=8,
                                         pin_memory=True)
    setattr(dataloader['base'], 'total_item_len', len(base_set))

    # ---------------------------- model ------------------------------------
    if config.model_name == 'resnet18':
        model = ResNet18(config.code_length, classes, config.class_mask)
    elif config.model_name == 'alexnet':
        model = AlexNet(config.code_length, classes, config.class_mask)
    else:
        print('undefined model ! ')

    model = nn.DataParallel(model)
    model.cuda()

    # ---------------------------- loss and opt ------------------------------------
    criterion = nn.CrossEntropyLoss()
    criterion_hash = nn.MSELoss()
    optimizer = optim.SGD(model.parameters(), lr=config.lr, momentum=0.9)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)

    # ---------------------------- log and train ------------------------------------
    log_file = open(
        config.model_name + '_' + config.dataset + '_' +
Exemple #8
0
            if torch.cuda.is_available():
                images = images.cuda()
                labels = labels.cuda()
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            item += 1
    return (correct / total)


if __name__ == "__main__":
    if args.model == 'mAlexNet':
        net = mAlexNet()
    elif args.model == 'AlexNet':
        net = AlexNet()
    elif args.model == "carnet":
        net = carNet()
    elif args.model == 'stn_shuf':
        net = stn_shufflenet()
    elif args.model == 'stn_trans_shuf':
        net = stn_trans_shufflenet()
    elif args.model == 'shuf':
        net = torchvision.models.shufflenet_v2_x1_0(pretrained=False,
                                                    num_classes=2)
    elif args.model == 'trans_shuf':
        net = trans_shufflenet()
    torch.set_default_tensor_type('torch.FloatTensor')
    print("test net:carNet..")
    # print({k.replace('module.',''):v for k,v in torch.load(args.path,map_location="cpu").items()})
    # exit()
Exemple #9
0
def model_fn(features, labels, mode, params):
    """Model function for tf.estimator
    Args:
        features: input batch of images
        labels: labels of the images
        mode: can be one of tf.estimator.ModeKeys.{TRAIN, EVAL, PREDICT}
        params: dictionary of hyperparameters of the model (ex: `params.learning_rate`)
    Returns:
        model_spec: tf.estimator.EstimatorSpec object
    """

    # Unpack images
    images = features
    images = tf.reshape(images, [-1, params.image_size, params.image_size, 3])
    assert images.shape[1:] == [params.image_size, params.image_size,
                                3], "{}".format(images.shape)

    # -----------------------------------------------------------
    # MODEL: define the layers of the model
    train_layers = ['fc8', 'fclat', 'fc7', 'fc6']

    alexnet_model = AlexNet(images, train_layers, params, mode)

    # Link variable to model output
    score = alexnet_model.fc8
    embeddings_bin = tf.cast(tf.round(alexnet_model.fclat), tf.bool)
    embeddings_float = alexnet_model.fc7

    # Creating a prediction dictionary
    predictions = {
        'bit_codes': embeddings_bin,
        'float_codes': embeddings_float
    }

    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode, predictions=predictions)

    # Cast labels
    labels = tf.cast(labels, tf.int32)

    # Op for calculating the loss
    with tf.name_scope("softmax_cross_ent"):
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits_v2(logits=score,
                                                       labels=labels))

    # Model's predictions
    pred = tf.nn.softmax(score)

    eval_metric_ops = {
        "Evaluation_Accuracy":
        tf.metrics.accuracy(labels=tf.argmax(labels, 1),
                            predictions=tf.argmax(pred, 1))
    }

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss,
                                          eval_metric_ops=eval_metric_ops)

    # List of trainable variables of the layers we want to train
    var_list = [
        v for v in tf.trainable_variables()
        if v.name.split('/')[0] in train_layers
    ]

    # Train op
    # Adapted from https://github.com/thulab/DeepHash
    with tf.name_scope("train"):

        gst = tf.train.get_or_create_global_step()

        optimiser = tf.train.GradientDescentOptimizer(params.learning_rate)

        # Get gradients of all trainable variables
        grads_and_vars = optimiser.compute_gradients(loss, var_list)

        fc6w_grad, _ = grads_and_vars[-8]
        fc6b_grad, _ = grads_and_vars[-7]
        fc7w_grad, _ = grads_and_vars[-6]
        fc7b_grad, _ = grads_and_vars[-5]
        fclatw_grad, _ = grads_and_vars[-4]
        fclatb_grad, _ = grads_and_vars[-3]
        fc8w_grad, _ = grads_and_vars[-2]
        fc8b_grad, _ = grads_and_vars[-1]

        # Apply gradient descent to the trainable variables
        train_op = optimiser.apply_gradients([(fc6w_grad, var_list[0]),
                                              (fc6b_grad, var_list[1]),
                                              (fc7w_grad, var_list[2]),
                                              (fc7b_grad, var_list[3]),
                                              (fclatw_grad, var_list[4]),
                                              (fclatb_grad, var_list[5]),
                                              (fc8w_grad, var_list[6]),
                                              (fc8b_grad, var_list[7])],
                                             global_step=gst)

    # Add gradients to summary
    for gradient, var in grads_and_vars:
        tf.summary.histogram(var.name[:-2] + '/gradient', gradient)

    # Add the variables we train to the summary
    for var in var_list:
        tf.summary.histogram(var.name[:-2], var)

    # Evaluation op: Accuracy of the model
    with tf.name_scope("accuracy"):
        correctness = tf.equal(tf.argmax(pred, 1), tf.argmax(labels, 1))
        accuracy = tf.reduce_mean(tf.cast(correctness, tf.float32))

    # Add the accuracy to the summary
    tf.summary.scalar('Training_Accuracy', accuracy)

    # Assign pretrained weights to the AlexNet model
    init_fn = tf.contrib.framework.assign_from_values_fn(
        alexnet_model.get_map())

    return tf.estimator.EstimatorSpec(mode=mode,
                                      predictions=None,
                                      loss=loss,
                                      train_op=train_op,
                                      eval_metric_ops=None,
                                      training_hooks=[RestoreHook(init_fn)])
Exemple #10
0
# Ops for initializing the two different iterators
training_init_op = iterator.make_initializer(tr_data.data)
validation_init_op = iterator.make_initializer(val_data.data)

# TF placeholder for graph input and output
if mode == 'alexnet' or mode == 'resnet':
    x = tf.placeholder(tf.float32, [None, img_size[0], img_size[1], 3])
elif mode == 'densenet':
    x = Input(shape=(img_size[0], img_size[1], 3), name='data')
y = tf.placeholder(tf.float32, [None, num_classes])
keep_prob = tf.placeholder(tf.float32)
global_step = tf.Variable(0, trainable=False)

# Initialize model
if mode == 'alexnet':
    model = AlexNet(x, keep_prob, num_classes, train_layers)
    score = model.fc8
elif mode == 'densenet':
    model_op = DenseNet(sub_mode, x, num_classes=num_classes)
    model = model_op.create()
    score = model_op.output
elif mode == 'resnet':
    model_op = ResNet(resnet_size=sub_mode, num_classes=num_classes, resnet_version=1) 
    score = model_op.create(x, True)

# List of trainable variables of the layers we want to train
if 'all' in train_layers:
    var_list = tf.trainable_variables()
else:
    var_list = [v for v in tf.trainable_variables() if v.name.split('/')[0] in train_layers]