Exemple #1
0
def Pretrained_HO_RCNN(cfg):
    K.set_image_dim_ordering('th')
    weights =  False
    modelPrs = AlexNet((3, 227, 227), weights, cfg.nb_classes, include='fc')
    modelObj = AlexNet((3, 227, 227), weights, cfg.nb_classes, include='fc')
    modelPar = PairWiseStream(input_shape=(2,64,64), nb_classes = cfg.nb_classes, include='fc')             
    
    my_actual_weights_path = cfg.my_weights_path
    
    cfg.my_weights_path = cfg.prs_weights_path
    cfg.my_weights     = cfg.prs_weights
    modelPrs = _final_stop(modelPrs.input, modelPrs.output, cfg)
    cfg.my_weights_path = cfg.obj_weights_path
    cfg.my_weights     = cfg.obj_weights
    modelObj = _final_stop(modelObj.input, modelObj.output, cfg)
    cfg.my_weights_path = cfg.par_weights_path
    cfg.my_weights     = cfg.par_weights
    modelPar = _final_stop(modelPar.input, modelPar.output, cfg)
    
    cfg.my_weights_path = my_actual_weights_path
    cfg.my_weights = None
    
    models = [modelPrs, modelObj, modelPar]
    outputs = [model.layers[-2].output for model in models]
    outputs = Add()(outputs)
    inputs = [model.input for model in models]
    
    final_model = _final_stop(inputs, outputs, cfg)
    return final_model
Exemple #2
0
def alexnet():
    if not valid_request(request, UPLOAD_FOLDER): return redirect(request.url)
    # hard coded image as orig.png
    model = AlexNet(pretrained=True)
    model.eval()

    image = Image.open(os.path.join(UPLOAD_FOLDER, 'orig.png'))
    image = image.convert('RGB')

    out = topk_to_rank_string(*model(image))

    return render_template('/pages/alexnet.html', out=Markup(out[:-2]))
Exemple #3
0
def restore_model(file_path: str):
    info = torch.load(file_path)
    arch: str = info['hparams'].arch
    batch_norm = info['hparams'].batch_norm
    dataset: str = info['hparams'].dataset
    hidden_layers: int = info['hparams'].hidden_layers
    hidden_size: int = info['hparams'].hidden_size
    if arch == 'mlp' and dataset == 'mnist':
        model: nn.Module = MLP(input_size=784,
                               hidden_size=hidden_size,
                               num_hidden_layers=hidden_layers,
                               batch_norm=batch_norm)
    elif arch == 'alexnet':
        model: nn.Module = AlexNet()
    else:
        model: nn.Module = MLP(hidden_size=hidden_size,
                               num_hidden_layers=hidden_layers,
                               batch_norm=batch_norm)

    model.load_state_dict(info['model_state_dict'])
    lr = info['hparams'].lr
    momentum = info['hparams'].momentum
    weight_decay = info['hparams'].weight_decay
    optimizer = optim.SGD(model.parameters(),
                          lr=lr,
                          momentum=momentum,
                          weight_decay=weight_decay)
    optimizer.load_state_dict(info['optimizer_state_dict'])
    # model.eval()
    return model, optimizer, info['hparams']
Exemple #4
0
    def configure_model(self):
        """

        :return:
        """
        arch: str = self.hparams.arch
        batch_norm = self.hparams.batch_norm
        dataset: str = self.hparams.dataset
        hidden_layers: int = self.hparams.hidden_layers
        hidden_size: int = self.hparams.hidden_size
        if arch == 'mlp':
            if dataset == 'mnist':
                return MLP(input_size=784,
                           hidden_size=hidden_size,
                           num_hidden_layers=hidden_layers,
                           batch_norm=batch_norm)
            elif dataset == 'cifar10':
                return MLP(hidden_size=hidden_size,
                           num_hidden_layers=hidden_layers,
                           batch_norm=batch_norm)
            else:
                raise ValueError('invalid dataset specification!')
        elif arch == 'alexnet':
            return AlexNet()
        elif arch == 'vgg11':
            return VGG(vgg_name='VGG11')
        elif arch == 'vgg13':
            return VGG(vgg_name='VGG13')
        elif arch == 'resnet18':
            return ResNet18()
        elif arch == 'resnet34':
            return ResNet34()
        else:
            raise ValueError('Unsupported model!')
Exemple #5
0
def Fast_HO_RCNN(cfg):

    K.set_image_dim_ordering('tf')
    if cfg.backbone == 'vgg':
        print('Use VGG16 backbone')
        weights = VGG16_Weights_notop(
            cfg) if cfg.pretrained_weights == True else False
        modelShr = VGG16((None, None, cfg.cdim),
                         weights,
                         cfg.nb_classes,
                         include='basic')
    else:
        print('Use Alexnet backbone')
        weights = AlexNet_Weights_notop(
            cfg) if cfg.pretrained_weights == True else False
        modelShr = AlexNet((None, None, cfg.cdim),
                           weights,
                           cfg.nb_classes,
                           include='basic')
    prsRoI = input_rois()
    objRoI = input_rois()
    modelPrs = fastClassifier(modelShr.output,
                              prsRoI,
                              cfg,
                              nb_classes=cfg.nb_classes)
    modelObj = fastClassifier(modelShr.output,
                              objRoI,
                              cfg,
                              nb_classes=cfg.nb_classes)
    modelPar = fastPairWiseStream(input_shape=(None, 64, 64, 2),
                                  nb_classes=cfg.nb_classes,
                                  include='fc')

    if cfg.backbone == 'vgg':
        # Only train from conv3_1
        for i, layer in enumerate(modelShr.layers):
            layer.trainable = False
            if i > 6:
                break

    outputs = [modelPrs, modelObj, modelPar.output]
    outputs = [outputs[i] for i in range(len(outputs)) if cfg.inputs[i]]

    if sum(cfg.inputs) == 1:
        outputs = outputs[0]
    else:
        outputs = Add()(outputs)

    inputs = [prsRoI, objRoI, modelPar.input]
    inputs = [inputs[i] for i in range(len(inputs)) if cfg.inputs[i]]

    if cfg.inputs[0] or cfg.inputs[1]:
        inputs = [modelShr.input] + inputs

    final_model = _final_stop(inputs, outputs, cfg)

    return final_model
Exemple #6
0
def main():
    train_images = tf.data.TextLineDataset(
        '/home/caique/datasets/caltech101/caltech101_train.txt'
    )
    train_labels = tf.data.TextLineDataset(
        '/home/caique/datasets/caltech101/caltech101_train_labels.txt'
    )
    valid_images = tf.data.TextLineDataset(
        '/home/caique/datasets/caltech101/caltech101_test.txt'
    )
    valid_labels = tf.data.TextLineDataset(
        '/home/caique/datasets/caltech101/caltech101_test_labels.txt'
    )

    drop_data = tf.data.Dataset.zip((train_images, train_labels))
    drop_data = drop_data.map(get_parser(False)).batch(1)
    valid_data = tf.data.Dataset.zip((valid_images, valid_labels))
    valid_data = valid_data.map(get_parser(False)).batch(120)
    train_data = tf.data.Dataset.zip((train_images, train_labels))
    train_data = train_data.map(get_parser(True)).shuffle(3030).batch(101)

    session = tf.Session()
    model = AlexNet(101)

    saver = tf.train.Saver()
    saver.restore(session, './variables/alexnet-caltech101-finetunned-2-2')

    # dropped_filters = drop_filters(
    #     session, model, drop_data, valid_data, drop_total=500,
    #     drop_n=20
    # )

    # model.train(session, train_data, valid_data,
    #             epochs=40,
    #             lr=0.00001,
    #             # dropped_filters=dropped_filters,
    #             # train_layers=['fc8'],
    #             # weights_path='alexnet_weights.npy',
    #             variables_path='./variables/alexnet-caltech101-finetunned-33',
    #             model_name='alexnet-caltech101-finetunned-2')
    session.run(model.iterator.make_initializer(valid_data))
    print('final eval: {}'.format(model.eval(session)))
    session.close()
Exemple #7
0
def HO_RCNN(cfg):
    K.set_image_dim_ordering('th')
    weights = AlexNet_Weights_th(cfg) if cfg.pretrained_weights == True else False
    modelPrs = AlexNet((3, 227, 227), weights, cfg.nb_classes, include='fc')
    modelObj = AlexNet((3, 227, 227), weights, cfg.nb_classes, include='fc')
    modelPar = PairWiseStream(input_shape=(2,64,64), nb_classes = cfg.nb_classes, include='fc')             
    
    models = [modelPrs, modelObj, modelPar]
    models = [models[i] for i in range(len(models)) if cfg.inputs[i]]
    
    assert len(models)>0, 'minimum one model must be included in method'
    if len(models) == 1:
        outputs = models[0].output
    else:
        outputs = Add()([model.output for model in models])
    inputs = [model.input for model in models]
    
    final_model = _final_stop(inputs, outputs, cfg)
    return final_model
Exemple #8
0
def init_server(args):
    model = AlexNet()
    gradient_warehouse = GradientWarehouse(worker_num=args.world_size,
                                           lock=threading.Lock())
    threads_num = 2
    threads = []
    for i in range(threads_num):
        th = GradientServer(model=model,
                            gradient_warehouse=gradient_warehouse,
                            rank=i)
        threads.append(th)
        th.start()
    for t in threads:
        t.join()
Exemple #9
0
    def construct_model(self, model_name='r1', lr=1e-6):
        self.model_name = model_name
        tf.summary.image('image_angle_0', self.images, 1)

        with open(self.save_path + '/setup.txt', 'a') as self.out:
            self.out.write('Architecture: ' + str(model_name)+ '\n')
            self.out.write('number of channels: ' + str(self.n_channels) + '\n')
            self.out.write('img dimensionality: ' + str(self.img_dimens) + '\n')

        if model_name == 'r3':
            self.model = Regressor_3(self.images,
                                     self.counts,
                                     lr=lr)
        if model_name == 'alexnet':
            self.model = AlexNet(self.images,
                                 self.counts,
                                 lr=0.003)
        if model_name == 'lstm':
            self.model = CRNN(self.images,
                              self.counts,
                              lr=0.003)


        self.loss = self.model.loss()

        tf.summary.scalar("loss", self.loss)

        with tf.name_scope('train'):
            self.train_step = tf.train.AdamOptimizer(lr).minimize(self.loss)

        tf.add_to_collection(name='saved', value=self.loss)
        tf.add_to_collection(name='saved', value=self.model.pred_counts)
        if self.model_name == 'lstm':
            tf.add_to_collection(name='saved', value=self.reconstruction)

        tf.add_to_collection(name='placeholder', value=self.x)
        tf.add_to_collection(name='placeholder', value=self.y)
        tf.add_to_collection(name='placeholder', value=self.images)
        tf.add_to_collection(name='placeholder', value=self.counts)
        tf.add_to_collection(name='placeholder', value=self.model.keep_prob)
        tf.add_to_collection(name='placeholder', value=self.model.is_training)
        tf.add_to_collection(name='placeholder', value=self.iterator.initializer)


        self.summaries   = tf.summary.merge_all()
        self.saver       = tf.train.Saver()

        self.writer      = tf.summary.FileWriter(self.save_path+'/logs/train')
        self.writer_test = tf.summary.FileWriter(self.save_path+'/logs/test')
Exemple #10
0
def get_model(model_name, num_classes):
    if model_name == "create_new_model":
        return create_new_model(num_classes)
    elif model_name == "AlexNet":
        return AlexNet(num_classes)
    elif model_name == "LeNet5":
        return LeNet5(num_classes)
    elif model_name == "VGG16":
        return VGG16(num_classes)
    elif model_name == "ResNet50":
        return ResNet50(num_classes)
    elif model_name == "InceptionV3":
        return InceptionV3(num_classes)
    elif model_name == "DeepFace":
        return DeepFace(num_classes)
Exemple #11
0
def get_model(name, device):
    """
    Returns required classifier and autoencoder
    :param name:
    :return: Autoencoder, Classifier
    """
    if name == 'lenet':
        model = LeNet(in_channels=channels).to(device)
    elif name == 'alexnet':
        model = AlexNet(channels=channels, num_classes=10).to(device)
    elif name == 'vgg':
        model = VGG(in_channels=channels, num_classes=10).to(device)

    autoencoder = CAE(in_channels=channels).to(device)
    return model, autoencoder
    def model_fn(input_images, num_classes, is_training, keep_prob):
        if model_name == 'alexnet':
            model = AlexNet(input_images,
                            keep_prob,
                            num_classes,
                            weight_decay=0.0005)

        elif model_name == 'binary_connect_mlp':
            model = binaryconnect.MLP(input_images, is_training, 1.0,
                                      num_classes, binary, stochastic)

        elif model_name == 'binary_connect_cnn':
            # Paper settings: 500 epochs, batch size 50
            model = binaryconnect.CNN(input_images, is_training, num_classes,
                                      binary, stochastic)

        return model
Exemple #13
0
    def __init__(self, model,dataset_index=0,video_target = None):

        if args.video == None:
            
            self.video_target = video_target
            customset_train = CustomDataset(path = args.dataset_path,subset_type="training",dataset_index=dataset_index,video_target = video_target)
            customset_test = CustomDataset(path = args.dataset_path,subset_type="testing",dataset_index=dataset_index, video_target = video_target)
        
            self.trainloader = torch.utils.data.DataLoader(dataset=customset_train,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
            self.testloader = torch.utils.data.DataLoader(dataset=customset_test,batch_size=args.batch_size,shuffle=False,num_workers=args.num_workers)    
        else:
            video_dataset = VideoDataset(video=args.video, batch_size=args.batch_size,
                                        frame_skip=int(args.frame_skip),image_folder=args.extract_frames_path, use_existing=args.use_existing_frames)
            
            self.videoloader = torch.utils.data.DataLoader(dataset=video_dataset, batch_size=1,shuffle=False,num_workers=args.num_workers)

   
        if (model == "alex"):
            self.model = AlexNet()
        elif (model == "vgg"):
            self.model = VGG()
        elif (model == "resnet"):
            self.model = ResNet()

        if args.pretrained_model != None:
            if args.pretrained_finetuning == False:
                self.model.load_state_dict(torch.load(args.pretrained_model))
            else:
                print "DEBUG : Make it load only part of the resnet model"
                #print(self.model)
                #self.model.load_state_dict(torch.load(args.pretrained_model))
                #for param in self.model.parameters():
                #    param.requires_grad = False
                self.model.fc = nn.Linear(512, 1000)
                #print(self.model)
                self.model.load_state_dict(torch.load(args.pretrained_model))
                self.model.fc = nn.Linear(512,3)
                #print(self.model)
                
        self.model.cuda()        
        print "Using weight decay: ",args.weight_decay
        self.optimizer = optim.SGD(self.model.parameters(), weight_decay=float(args.weight_decay),lr=0.01, momentum=0.9,nesterov=True)
        self.criterion = nn.CrossEntropyLoss().cuda()
Exemple #14
0
    def __init__(self, model,dataset_index=0, path = None):

        self.sampler = self.weighted_sampling(dataset_index=dataset_index,path=path)

        customset_train = CustomDatasetViewpoint(path = path,subset_type="training",dataset_index=dataset_index)
        customset_test = CustomDatasetViewpoint(path = path,subset_type="testing",dataset_index=dataset_index)

        self.trainloader = torch.utils.data.DataLoader(pin_memory=True,dataset=customset_train,sampler=self.sampler,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
        self.trainloader_acc = torch.utils.data.DataLoader(dataset=customset_train,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)
        self.testloader_acc = torch.utils.data.DataLoader(dataset=customset_test,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers)

        if (model == "alex"):
            self.model = AlexNet()
        elif (model == "vgg"):
            self.model = VGG(num_classes=2)
        elif (model == "resnet"):
            self.model = ResNet()

        if args.pretrained_model != None:
            if args.pretrained_same_architecture:
                self.model.load_state_dict(torch.load(args.pretrained_model))
            else:
                if args.arch == "vgg":
                    self.model.soft = None
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096,1000))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.load_state_dict(torch.load(args.pretrained_model))
                    classifier = list(self.model.classifier.children())
                    classifier.pop()
                    classifier.append(torch.nn.Linear(4096,2))
                    new_classifier = torch.nn.Sequential(*classifier)
                    self.model.classifier = new_classifier
                    self.model.soft = nn.LogSoftmax()
                else:
                    self.model.fc = nn.Linear(512, 1000)
                    self.model.load_state_dict(torch.load(args.pretrained_model))
                    self.model.fc = nn.Linear(512,2)     
   
        self.optimizer = optim.Adam(self.model.parameters(), weight_decay=float(args.weight_decay), lr=0.0001)
def main(datasets_path='/home/caique/datasets/caltech101',
         variables_path='./variables/alexnet-caltech101-78'):
    train_images = tf.data.TextLineDataset(datasets_path +
                                           '/caltech101_train.txt')
    train_labels = tf.data.TextLineDataset(datasets_path +
                                           '/caltech101_train_labels.txt')
    valid_images = tf.data.TextLineDataset(datasets_path +
                                           '/caltech101_valid.txt')
    valid_labels = tf.data.TextLineDataset(datasets_path +
                                           '/caltech101_valid_labels.txt')

    # drop_data = tf.data.Dataset.zip((train_images, train_labels))
    # drop_data = drop_data.map(get_parser(False)).batch(1)
    valid_data = tf.data.Dataset.zip((valid_images, valid_labels))
    valid_data = valid_data.map(get_parser(False)).batch(120)
    train_data = tf.data.Dataset.zip((train_images, train_labels))
    train_data = train_data.map(get_parser(True)).shuffle(3030).batch(101)

    model = AlexNet(101, keep_prob=1.0)
    drop_filters(model, train_data, valid_data, variables_path)
    def __init__(self, model, dataset_index=0, path=None, viewpoints=3):

        if (model == "alex"):
            self.model = AlexNet()
        elif (model == "vgg"):
            self.model = VGG(num_classes=2)
        elif (model == "resnet"):
            self.model = ResNet()
        elif (model == "ED"):
            self.model_ED = EncoderDecoderViewpoints()

        self.model_vgg = VGG_viewpoints(num_classes=3).cuda()
        self.model_ed = EncoderDecoderViewpoints().cuda()

        self.model_vgg = nn.DataParallel(self.model_vgg,
                                         device_ids=[0, 1, 2, 3]).cuda()
        self.model_vgg.load_state_dict(
            torch.load(
                "./results/viewpoint_models/vgg_viewpoint_ED_prepared/model_epoch_2.pth"
            ))
        mod = list(self.model_vgg.module.classifier.children())
        mod.pop()
        mod.pop()
        mod.pop()
        new_classifier = torch.nn.Sequential(*mod)
        self.model_vgg.module.new_classifier = new_classifier
        print self.model_vgg

        # Trained ED loading, comment to disable

        self.model_ed.load_state_dict(
            torch.load(
                "./results/viewpoint_models/vgg_viewpoint_ED_disjointed/model_ed_epoch_20.pth"
            ))

        print self.model_ed
Exemple #17
0
# Dataloaders
batch_size = 128
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=batch_size,
                                         sampler=valid_sampler)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=batch_size,
                                          sampler=test_sample)

# GPU setup
device = torch.device('cuda')

net = AlexNet(in_channel=2, classes=10).to(device=device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)  #, momentum=0.9)

# Train loop
num_epochs = 25
for epoch in range(num_epochs):

    print("Epoch: {} - Train".format(epoch))
    net.train()
    running_loss = 0.
    # Train:
    for batch_index, (signals, labels) in enumerate(tqdm(train_loader)):

        signals, labels = signals.to(device=device), labels.to(device=device)
Exemple #18
0
    # fix the random seed
    # 0 999 333 111 123
    SEED = 0

    torch.manual_seed(SEED)
    np.random.seed(SEED)

    # hyperparameters
    PATHS = stock_data_paths()
    MODEL_PATH = "weights/cnn/try"
    BATCH_SIZE = 100
    N_EPOCHS = 1
    N_LAGS = 25
    Y_DAYS = 1
    NUM_FEA = 1
    LR = 0.01

    # load the dataset
    X_train, y_train, X_test, y_test = create_input_data(PATHS, N_LAGS, Y_DAYS)
    train_dataset = StockDataset(X_train, y_train)
    train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE)
    test_dataset = StockDataset(X_test, y_test)
    test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE)

    net = AlexNet(N_LAGS, Y_DAYS).to(device)
    train(net, N_EPOCHS, train_loader, LR, MODEL_PATH)
    eval(net, MODEL_PATH, test_loader=test_loader)
    #plot_one_stock(X_test, y_test, net, MODEL_PATH, length=1000)

    #The MSE is  0.002010555131915416
    #The MSE is  0.004173629942736138
Exemple #19
0
from models import LeNet, AlexNet, VGG13, ResNet34, TestNet
from keras.models import load_model
from keras import optimizers
from prepare_data import load_data
from utils import CLASS_NUM, IMG_SIZE
import os

train_X, test_X, train_y, test_y = load_data(class_num=CLASS_NUM,
                                             img_size=IMG_SIZE)

if not os.path.exists('autogta.h5'):
    autogta = AlexNet(train_X[0].shape)
    autogta.compile(optimizer='adam',
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])
    autogta.fit(x=train_X, y=train_y, epochs=10, batch_size=16)
    autogta.save('autogta.h5')

else:
    autogta = load_model('autogta.h5')

loss, accu = autogta.evaluate(x=test_X, y=test_y)

print('loss\t{}\naccuracy\t{}'.format(loss, accu))
Exemple #20
0
def init_server(args):
    model = AlexNet()
    server = ParameterServer(model=model)
    server.run()
Exemple #21
0
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import torch
from models import AlexNet
from torch.autograd import Variable

import warnings
warnings.simplefilter("ignore")

# Detect all faces in an image
# load in a haar cascade cassifier fro detecting frontal faces
face_cascade = cv2.CascadeClassifier(
    './detectors/haarcascade_frontalface_default.xml')

net = AlexNet()
# loading the best saved model parameters
net.load_state_dict(
    torch.load('./saved_models/keypoints_model_AlexNet_50epochs.pth'))

# perepate the net for testing mode
net.eval()

# image = cv2.imread('imgs/10.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

faces = face_cascade.detectMultiScale(image, 1.2, 2)

# make a copy of the original image to plot detections on
image_with_detections = image.copy()
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)


    # Set # classes
    if args.data == 'UCF101':
        num_classes = 101
    else:
        num_classes = 0
        print('Specify the dataset to use ')


    # Create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if args.arch.startswith('alexnet'):
        model = AlexNet(num_classes=num_classes)
        model.features = torch.nn.DataParallel(model.features)
        model.cuda()

    else:
        model = torch.nn.DataParallel(model).cuda()


    # Modify last layer of the model
    model_ft = models.resnet18(pretrained=True)
    num_ftrs = model_ft.fc.in_features
    model_ft.fc = nn.Linear(num_ftrs, 101)
    model = model_ft.cuda()
    model = torch.nn.DataParallel(model).cuda() # Using one GPU (device_ids = 1)
    # print(model)


    # Define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # Optionally resume from a checkpoint
    if args.resume:
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch']
        best_prec1 = checkpoint['best_prec1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})"
              .format(args.resume, checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    testdir = os.path.join(args.data, 'test')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(traindir, transforms.Compose([
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size = args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True
    )

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(testdir, transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size = args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True
    )

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # Train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # # Evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # Remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'optimizer': optimizer.state_dict(),
        }, is_best)
Exemple #23
0
if __name__ == '__main__':

    writer = SummaryWriter('runs/histo_run_AlexNet_lr_1e-4')

    df = pd.read_csv(data.train_csv)
    train_df, val_df = train_test_split(df, test_size=0.15)
    train_dataset = HistoDataset(train_df, data.train_dir)
    val_dataset = HistoDataset(val_df, data.train_dir)

    test_df = pd.read_csv(data.test_csv)
    test_dataset = HistoDataset(test_df, data.test_dir, flag=HistoDataset.TEST_SET)

    train_loader = DataLoader(train_dataset, batch_size=4)
    val_loader = DataLoader(val_dataset, batch_size=4)

    model = AlexNet.AlexNet()

    img, label = next(iter(train_loader))
    writer.add_graph(model, img)

    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=2, verbose=True)
    loss_fn = torch.nn.BCEWithLogitsLoss()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = train_model(model, train_loader, val_loader, optimizer, scheduler, 20, loss_fn, device, writer)

    torch.save(model.state_dict(), 'models/AlexNet.pt')

    predictions = predict(model, test_dataset, device)
Exemple #24
0
                        help='feature extraction model')
    parser.add_argument('--feature_name',
                        default='conv_3',
                        type=str,
                        help='feature extraction layer')
    parser.add_argument('--allsubj',
                        action='store_true',
                        help='whether or not to use all subjects')
    parser.add_argument('--l2',
                        default=0,
                        type=float,
                        help='L2 regularization weight')
    args = parser.parse_args()

    if args.feature_extractor == 'alexnet':
        feat_extractor = AlexNet(args.feature_name)
    elif args.feature_extractor == 'vgg16':
        feat_extractor = VGG16(args.feature_name)
    else:
        raise ValueError('unimplemented feature extractor: {}'.format(
            args.feature_extractor))
    if torch.cuda.is_available():
        feat_extractor.cuda()

    subj_file = 'subjall.npy' if args.allsubj else 'subj1.npy'
    voxels, stimuli = voxel_data(os.path.join(args.bold5000_folder, subj_file),
                                 args.roi)
    voxel_pcs = PCA(n_components=voxels.shape[1]).fit_transform(voxels)

    stimuli = [
        os.path.join(args.bold5000_folder, 'stimuli', s) for s in stimuli
Exemple #25
0
import os
import numpy as np
import torch
from tqdm import tqdm
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
from sklearn.model_selection import KFold
from models import CCAEncoder, PCAEncoder, AlexNet
import utils

n_components = 10
roi = 'PPA'
bold5000_folder = '/home/eelmozn1/datasets/adversarial_tms/bold5000'
feat_extractor = AlexNet('conv_3')


def voxel_data(subj_file, roi):
    voxels = np.load(subj_file, allow_pickle=True).item()
    voxels = {c: v[roi] for c, v in voxels.items()}
    return voxels


def condition_features(stimuli_folder, model):
    print('Extracting features')
    stimuli = os.listdir(stimuli_folder)
    condition_features = {}
    batch_size = 32
    for i in tqdm(range(0, len(stimuli), batch_size)):
        batch_names = stimuli[i:i + batch_size]
        batch = [
            utils.image_to_tensor(os.path.join(stimuli_folder, n),
Exemple #26
0
worker_init_fn (callable, 可选) --- 如果不是 None,那么将在每个工人子进程上使用 worker id(在 [0,num_workers - 1] 中的 int)作为输入,在 seeding 和加载数据之前调用这个子进程。(默认:无)
'''

# 测试数据的获取
# 首先设置测试数据的路径
test_path = 'D:/dataset/dogs-vs-cats/test1'
# 从测试数据集的存储路径中提取测试数据集
test_path = GetData(test_path, test=True)
# 将测试数据转换成 mini-batch 形式
loader_test = data.DataLoader(test_dataset, batch_size=3, shuffle=True, num_workers=1)

# --------------------------- 1.加载数据 end ----------------------------------------------------------------

# --------------------------- 2.构建 CNN 模型 start ----------------------------------------------------------------
# 调用我们现成的 AlexNet() 模型
cnn = AlexNet()
# 将模型打印出来观察一下
print(cnn)

# --------------------------- 2.构建 CNN 模型 end ------------------------------------------------------------------

# --------------------------- 3.设置相应的优化器和损失函数 start ------------------------------------------------------------------

'''
1、torch.optim 是一个实现各种优化算法的软件包。
比如我们这里使用的就是 Adam() 这个方法
class torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) 这个类就实现了 adam 算法。
params(iterable) --- 可迭代的参数来优化或取消定义参数组
lr(float, 可选) --- 学习率(默认值 1e-3)
beta(Tuple[float, float], 可选) --- 用于计算梯度及其平方的运行平均值的系数(默认值:(0.9,0.999))
eps (float, 可选) ---- 添加到分母以提高数值稳定性(默认值:1e-8)
Exemple #27
0
traindir = '/opt/data/kaggle/playground/dogs-vs-cats/sample_train'
train_dataset = DogCat(traindir, train=True)
loader_train = data.DataLoader(train_dataset,
                               batch_size=20,
                               shuffle=True,
                               num_workers=1)

testdir = '/opt/data/kaggle/playground/dogs-vs-cats/sample_test'
test_dataset = DogCat(testdir, train=True)
loader_test = data.DataLoader(test_dataset,
                              batch_size=3,
                              shuffle=True,
                              num_workers=1)

# 2. 创建 CNN 模型
cnn = AlexNet()
print(cnn)
# 3. 设置优化器和损失函数
optimizer = torch.optim.Adam(cnn.parameters(), lr=0.005,
                             betas=(0.9, 0.99))  # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss()  # the target label is not one-hotted

# 4. 训练模型
EPOCH = 10  # train the training data n times, to save time, we just train 1 epoch
# training and testing
for epoch in range(EPOCH):
    num = 0
    # gives batch data, normalize x when iterate train_loader
    for step, (x, y) in enumerate(loader_train):
        b_x = Variable(x)  # batch x
        b_y = Variable(y)  # batch y
Exemple #28
0
            writer.add_scalar('train/acc', acc, n_iter)
            writer.add_scalar('val/loss', val_loss, n_iter)
            writer.add_scalar('val/acc', val_acc, n_iter)


    # dataloader
    train_loader, test_loader = getDataloader(root=dataset, batchsize=batchsize, worker=worker_num)


    # model select
    # If you want to change the details of the model, please go to the models folder 
    # to edit the model.
    # model select
    if modelName == 'AlexNet':
        from models.AlexNet import *
        model = AlexNet()
    elif modelName == "SqueezeNet":
        from models.SqueezeNet import *
        model = SqueezeNet()
    elif modelName == "VGG16":
        from models.VGG16 import *
        model = VGG16()
    elif modelName == "GoogLeNet":
        from models.GoogLeNet import *
        model = GoogLeNet()
    elif modelName == "ResNet18":
        from models.ResNet18 import *
        model = ResNet18()
    elif modelName == "DenseNet121":
        from models.DenseNet import *
        model = DenseNet121()
Exemple #29
0
def main(args):

    logs = []

    transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
            ])

    trainloader, testloader = get_dataset(args, transform)
    net = AlexNet()

    if args.no_distributed:
        optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.0)
    else:
        optimizer = DownpourSGD(net.parameters(), lr=args.lr, n_push=args.num_push, n_pull=args.num_pull, model=net)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=1, verbose=True, min_lr=1e-3)

    # train
    net.train()
    if args.cuda:
        net = net.cuda()

    for epoch in range(args.epochs):  # loop over the dataset multiple times
        print("Training for epoch {}".format(epoch))
        for i, data in enumerate(trainloader, 0):
            # get the inputs
            inputs, labels = data

            if args.cuda:
                inputs, labels = inputs.cuda(), labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()
            # forward + backward + optimize
            outputs = net(inputs)
            loss = F.cross_entropy(outputs, labels)
            loss.backward()
            optimizer.step()

            _, predicted = torch.max(outputs, 1)
            accuracy = accuracy_score(predicted, labels)

            log_obj = {
                'timestamp': datetime.now(),
                'iteration': i,
                'training_loss': loss.item(),
                'training_accuracy': accuracy,
            }

            if i % args.log_interval == 0 and i > 0:    # print every n mini-batches
                log_obj['test_loss'], log_obj['test_accuracy']= evaluate( net, testloader, args)
                print("Timestamp: {timestamp} | "
                      "Iteration: {iteration:6} | "
                      "Loss: {training_loss:6.4f} | "
                      "Accuracy : {training_accuracy:6.4f} | "
                      "Test Loss: {test_loss:6.4f} | "
                      "Test Accuracy: {test_accuracy:6.4f}".format(**log_obj))

            logs.append(log_obj)
                
        val_loss, val_accuracy = evaluate(net, testloader, args, verbose=True)
        scheduler.step(val_loss)

    df = pd.DataFrame(logs)
    print(df)
    if args.no_distributed:
        if args.cuda:
            df.to_csv('log/gpu.csv', index_label='index')
        else:
            df.to_csv('log/single.csv', index_label='index')
    else:
        df.to_csv('log/node{}.csv'.format(dist.get_rank()), index_label='index')

    print('Finished Training')
Exemple #30
0
loader_train = torch.utils.data.DataLoader(
    train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
    num_workers=args.workers, pin_memory=True, sampler=train_sampler)

loader_test = torch.utils.data.DataLoader(
    datasets.ImageFolder(valdir, transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ])),
    batch_size=args.batch_size, shuffle=False,
    num_workers=args.workers, pin_memory=True)

# Load the pretrained model
net = AlexNet()
net.load_state_dict(torch.load('/home/choong/.torch/models/alexnet-owt-4df8aa71.pth'), strict=False)
if torch.cuda.is_available():
    print('CUDA enabled.')
    net.cuda()
print("--- Pretrained network loaded ---")
# test(net, loader_test)

# prune the weights
masks = weight_prune(net, param['pruning_perc'])
net.set_masks(masks)
net = nn.DataParallel(net)
print("--- {}% parameters pruned ---".format(param['pruning_perc']))
test(net, loader_test)

Exemple #31
0
filewriter_path = 'cifar100_history/'
checkpoint_path = 'checkpoints/'

IMAGE_SIZE = 24
OUTPUT_FILE_NAME = 'train_output.txt'

decay_steps = int(len(target_train_data) / batch_size)
learning_rate_decay_factor = 0.95

if not os.path.isdir(filewriter_path): os.mkdir(filewriter_path)
if not os.path.isdir(checkpoint_path): os.mkdir(checkpoint_path)

x = tf.placeholder(tf.float32, [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3])
y = tf.placeholder(tf.float32, [None, num_classes])

model = AlexNet(x, num_classes)
score = model.fc5

var_list = [
    v for v in tf.trainable_variables() if v.name.split('/')[0] in train_layers
]

initial_x_batch = tf.placeholder(tf.float32,
                                 [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3])
dist_x_batch = distorted_batch(initial_x_batch, IMAGE_SIZE)

with tf.name_scope("cross_ent"):
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=score, labels=y))

with tf.name_scope('train'):
Exemple #32
0
def main():

    use_cuda = torch.cuda.is_available() and not args.no_cuda
    device = torch.device('cuda' if use_cuda else 'cpu')
    print(device)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if use_cuda:
        torch.cuda.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True

    rgb = False
    if args.mode == 'rgb':
        rgb = True

    if args.gray_scale:
        rgb = False

    if args.tracking_data_mod is True:
        args.input_size = 192

    # DATALOADER

    train_dataset = GesturesDataset(model=args.model, csv_path='csv_dataset', train=True, mode=args.mode, rgb=rgb,
                                    normalization_type=1,
                                    n_frames=args.n_frames, resize_dim=args.input_size,
                                    transform_train=args.train_transforms, tracking_data_mod=args.tracking_data_mod)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers)

    validation_dataset = GesturesDataset(model=args.model, csv_path='csv_dataset', train=False, mode=args.mode, rgb=rgb, normalization_type=1,
                                   n_frames=args.n_frames, resize_dim=args.input_size, tracking_data_mod=args.tracking_data_mod)
    validation_loader = DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.n_workers)

    # paramteri per la rete

    in_channels = args.n_frames if not rgb else args.n_frames * 3
    n_classes = args.n_classes

    if args.model == 'LeNet':
        model = LeNet(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == 'AlexNet':
        model = AlexNet(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == 'AlexNetBN':
        model = AlexNetBN(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == "Vgg16":
        model = Vgg16(input_channels=in_channels, input_size=args.input_size, n_classes=n_classes).to(device)

    elif args.model == "Vgg16P":
        model = models.vgg16(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(3, 3), stride=1, padding=1)
        model.classifier._modules['6'] = nn.Linear(4096, n_classes)
        # model.fc = torch.nn.Linear(model.fc.in_features, n_classes)
        model = model.to(device)

    elif args.model == "ResNet18P":
        model = models.resnet18(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model._modules['conv1'] = nn.Conv2d(in_channels, 64, 7, stride=2, padding=3)
        model.fc = torch.nn.Linear(model.fc.in_features, n_classes)
        model = model.to(device)

    elif args.model == "ResNet34P":
        model = models.resnet34(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model._modules['conv1'] = nn.Conv2d(in_channels, 64, 7, stride=2, padding=3)
        model.fc = torch.nn.Linear(model.fc.in_features, n_classes)
        model = model.to(device)

    elif args.model == "DenseNet121P":
        model = models.densenet121(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=1024, out_features=n_classes, bias=True)
        model = model.to(device)

    elif args.model == "DenseNet161P":
        model = models.densenet161(pretrained=args.pretrained)
        # for params in model.parameters():
        #     params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=96, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=2208, out_features=n_classes, bias=True)
        model = model.to(device)

    elif args.model == "DenseNet169P":
        model = models.densenet169(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=1664, out_features=n_classes, bias=True)
        model = model.to(device)

    elif args.model == "DenseNet201P":
        model = models.densenet201(pretrained=args.pretrained)
        for params in model.parameters():
            params.requires_grad = False
        model.features._modules['conv0'] = nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=(7, 7),
                                                     stride=(2, 2), padding=(3, 3))
        model.classifier = nn.Linear(in_features=1920, out_features=n_classes, bias=True)
        model = model.to(device)
    # RNN
    elif args.model == 'LSTM' or args.model == 'GRU':
        model = Rnn(rnn_type=args.model, input_size=args.input_size, hidden_size=args.hidden_size,
                    batch_size=args.batch_size,
                    num_classes=args.n_classes, num_layers=args.n_layers,
                    final_layer=args.final_layer).to(device)
    # C3D

    elif args.model == 'C3D':
        if args.pretrained:
            model = C3D(rgb=rgb, num_classes=args.n_classes)


            # modifico parametri
            print('ok')

            model.load_state_dict(torch.load('c3d_weights/c3d.pickle', map_location=device), strict=False)
            # # for params in model.parameters():
            #     # params.requires_grad = False

            model.conv1 = nn.Conv3d(1 if not rgb else 3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
            # tolgo fc6 perchè 30 frames
            model.fc6 = nn.Linear(16384, 4096)  # num classes 28672 (112*200)
            model.fc7 = nn.Linear(4096, 4096)  # num classes
            model.fc8 = nn.Linear(4096, n_classes)  # num classes

            model = model.to(device)


    # Conv-lstm
    elif args.model == 'Conv-lstm':
        model = ConvLSTM(input_size=(args.input_size, args.input_size),
                         input_dim=1 if not rgb else 3,
                         hidden_dim=[64, 64, 128],
                         kernel_size=(3, 3),
                         num_layers=args.n_layers,
                         batch_first=True,
                         ).to(device)
    elif args.model == 'DeepConvLstm':
        model = DeepConvLstm(input_channels_conv=1 if not rgb else 3, input_size_conv=args.input_size, n_classes=12,
                             n_frames=args.n_frames, batch_size=args.batch_size).to(device)

    elif args.model == 'ConvGRU':
        model = ConvGRU(input_size=40, hidden_sizes=[64, 128],
                        kernel_sizes=[3, 3], n_layers=2).to(device)

    else:
        raise NotImplementedError

    if args.opt == 'SGD':
        optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
        # optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=args.momentum)

    elif args.opt == 'Adam':
        optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    loss_function = nn.CrossEntropyLoss().to(device)

    start_epoch = 0
    if args.resume:
        checkpoint = torch.load("/projects/fabio/weights/gesture_recog_weights/checkpoint{}.pth.tar".format(args.model))
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']

        print("Resuming state:\n-epoch: {}\n{}".format(start_epoch, model))

    #name experiment
    personal_name = "{}_{}_{}".format(args.model, args.mode, args.exp_name)
    info_experiment = "{}".format(personal_name)
    log_dir = "/projects/fabio/logs/gesture_recog_logs/exps"
    weight_dir = personal_name
    log_file = open("{}/{}.txt".format("/projects/fabio/logs/gesture_recog_logs/txt_logs", personal_name), 'w')
    log_file.write(personal_name + "\n\n")
    if personal_name:
        exp_name = (("exp_{}_{}".format(time.strftime("%c"), personal_name)).replace(" ", "_")).replace(":", "-")
    else:
        exp_name = (("exp_{}".format(time.strftime("%c"), personal_name)).replace(" ", "_")).replace(":", "-")
    writer = SummaryWriter("{}".format(os.path.join(log_dir, exp_name)))

    # add info experiment
    writer.add_text('Info experiment',
                    "model:{}"
                    "\n\npretrained:{}"
                    "\n\nbatch_size:{}"
                    "\n\nepochs:{}"
                    "\n\noptimizer:{}"
                    "\n\nlr:{}"
                    "\n\ndn_lr:{}"
                    "\n\nmomentum:{}"
                    "\n\nweight_decay:{}"
                    "\n\nn_frames:{}"
                    "\n\ninput_size:{}"
                    "\n\nhidden_size:{}"
                    "\n\ntracking_data_mode:{}"
                    "\n\nn_classes:{}"
                    "\n\nmode:{}"
                    "\n\nn_workers:{}"
                    "\n\nseed:{}"
                    "\n\ninfo:{}"
                    "".format(args.model, args.pretrained, args.batch_size, args.epochs, args.opt, args.lr, args.dn_lr, args.momentum,
                              args.weight_decay, args.n_frames, args.input_size, args.hidden_size, args.tracking_data_mod,
                              args.n_classes, args.mode, args.n_workers, args.seed, info_experiment))

    trainer = Trainer(model=model, loss_function=loss_function, optimizer=optimizer, train_loader=train_loader,
                      validation_loader=validation_loader,
                      batch_size=args.batch_size, initial_lr=args.lr,  device=device, writer=writer, personal_name=personal_name, log_file=log_file,
                      weight_dir=weight_dir, dynamic_lr=args.dn_lr)


    print("experiment: {}".format(personal_name))
    start = time.time()
    for ep in range(start_epoch, args.epochs):
        trainer.train(ep)
        trainer.val(ep)

    # display classes results
    classes = ['g0', 'g1', 'g2', 'g3', 'g4', 'g5', 'g6', 'g7', 'g8', 'g9', 'g10', 'g11']
    for i in range(args.n_classes):
        print('Accuracy of {} : {:.3f}%%'.format(
            classes[i], 100 * trainer.class_correct[i] / trainer.class_total[i]))

    end = time.time()
    h, rem = divmod(end - start, 3600)
    m, s, = divmod(rem, 60)
    print("\nelapsed time (ep.{}):{:0>2}:{:0>2}:{:05.2f}".format(args.epochs, int(h), int(m), s))


    # writing accuracy on file

    log_file.write("\n\n")
    for i in range(args.n_classes):
        log_file.write('Accuracy of {} : {:.3f}%\n'.format(
            classes[i], 100 * trainer.class_correct[i] / trainer.class_total[i]))
    log_file.close()