예제 #1
0
    def __init__(self, input_shape, number_class, AttenMethod):
        super(Res_clstm_MN, self).__init__()
        self.in_channels = input_shape[1]
        self.OutChannel_Res3D = [64, 64, 128, 256]
        self.in_LSTM = self.OutChannel_Res3D[-1]
        self.hidden_LSTM = [256, 256]
        self.in_channel_MN = self.hidden_LSTM[-1]
        self.input_shape = input_shape
        self.step_lstm = int(input_shape[2] / 2)
        self.batch_size = input_shape[0]
        self.number_class = number_class

        self.r3D = Res3D(self.in_channels, output_channels=self.OutChannel_Res3D, \
                  init_method = 'kaiming_normal_')
        self.aclstm = AttenConvLSTM(input_channels=self.in_LSTM, hidden_channels = self.hidden_LSTM, \
                                 kernel_size=3, step = self.step_lstm, init_method = 'xavier_normal_',\
                                 AttenMethod = AttenMethod)
        self.MoNet = MobileNet(input_channels=self.in_channel_MN)
        self.avgpool3d = nn.AvgPool3d(kernel_size=(self.step_lstm, 4, 4))
        self.dense = nn.Linear(1024, self.number_class)
        self.softmax = nn.Softmax(dim=1)
예제 #2
0
        image_generator = ImageDataGenerator(rescale=1. / 255)

    data_generator = image_generator.flow_from_directory(batch_size=BATCH_SIZE,
                                                         directory=path,
                                                         shuffle=train,
                                                         target_size=(IMG_SHAPE, IMG_SHAPE),
                                                         class_mode='sparse')

    return data_generator


if __name__ == '__main__':
    train_data_gen = get_generator(DATASET, True)
    val_data_gen = get_generator(DATASET, False)

    model = MobileNet()
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
    acc = tf.keras.metrics.SparseCategoricalAccuracy()
    EPOCHS = 1000
    train_batches = train_data_gen.n // BATCH_SIZE
    val_batches = val_data_gen.n // BATCH_SIZE

    for i in range(EPOCHS):
        print('-----EPOCH: {}-----'.format(i))
        for batch_index in range(train_batches):
            with tf.GradientTape() as tape:
                y_pred = model(train_data_gen[batch_index][0], training=True)
                y_true = train_data_gen[batch_index][1]
                loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=y_true, y_pred=y_pred)
                loss = tf.reduce_mean(loss)
                acc.update_state(y_true=y_true, y_pred=y_pred)
예제 #3
0
import tensorflow as tf
import random
import numpy as np
from MobileNet import MobileNet
from time import clock

# preload data to eliminite data loading time
images = tf.placeholder(tf.float32, shape=[None, 3, 224, 224])
labels = tf.placeholder(tf.float32, shape=[None, 1000])
preds = MobileNet(images, True)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=preds)
train_step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss)

batch_size = 64
max_batches = 50

sess = tf.Session()
sess.run(tf.global_variables_initializer())

print('Started.')

tot_time = 0
for _ in range(max_batches + 1):
    images_ = 2. * (np.random.rand(batch_size, 3, 224, 224) - 0.5)
    index = [random.randrange(1000) for i in range(batch_size)]
    labels_ = np.zeros(shape=(batch_size, 1000))
    labels_[range(batch_size), index] = 1.
    t0 = clock()
    sess.run(train_step, feed_dict={images: images_, labels: labels_})
    t1 = clock()
    if _ > 0:
    checkpoint = torch.load('./checkpoint/ckpt_318_99.730778.pth')
    net.load_state_dict(checkpoint['weight'])
    return net
    
"""

if __name__ == '__main__':
    input_size = 64
    number_classes = 2
    name = 'MobileNet{}x{}'.format(input_size, input_size)

    #hardware setting
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    device = 'cpu'  # now it works only under 'cpu' settings

    net = MobileNet(number_classes, input_size)

    #if(device == 'cuda'):
    net = net.to(device)

    # load a pre-trained pyTorch model
    checkpoint = torch.load("./best_ckpt_64x64_20190829.pth")
    net.load_state_dict(checkpoint['weight'])

    # if u want to use cpu, then you need to do something
    # net = net.to('cpu')
    # input_ = input_.to('cpu')

    net.eval()

    input_ = torch.ones([1, 3, input_size, input_size])
예제 #5
0
파일: train.py 프로젝트: djdajing/NDSC
    # train , 20% for testing,random_state is the seed used by the random number generator;
    split = train_test_split(data,
                             brandLabels,
                             colorLabels,
                             test_size=0.2,
                             random_state=42)
    (trainX, testX, trainBrandY, testBrandY, trainColorY, testColorY) = split

    train_labels = [trainBrandY, trainColorY]
    test_labels = [testBrandY, testColorY]
    '''END OF NEED MANUAL '''

    cat_classcount_match_dict = category_classcount_match(wanted_categories)
    model = MobileNet.build(IMG_DIM,
                            IMG_DIM,
                            catclasscountmatch=cat_classcount_match_dict,
                            finalAct="softmax")

    losses, lossWeights = gen_loss_dicts(wanted_categories_output)

    print("[INFO] compiling model...")
    opt = Adam(lr=LR, decay=LR / EPOCHS)
    model.compile(optimizer=opt,
                  loss=losses,
                  loss_weights=lossWeights,
                  metrics=["accuracy"])

    trainY = gen_y_parameter_for_fit(wanted_categories_output, train_labels)
    testY = gen_y_parameter_for_fit(wanted_categories_output, test_labels)

    # train the network to perform multi-output classification
from __future__ import print_function

import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from time import clock

from MobileNet import MobileNet

model = MobileNet()
model.cuda()

print('Model created.')

transform = transforms.Compose([
    transforms.RandomSizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

traindir = '/home/zheng/Datasets/ILSVRC/ILSVRC2012_images_train'
train = datasets.ImageFolder(traindir, transform)

print('Dataset created.')

train_loader = torch.utils.data.DataLoader(train, batch_size=64, shuffle=True, num_workers=4)