def train_top_model(class_weight=None):
    train_data = np.load(open(config.bf_train_path, 'rb'))
    validation_data = np.load(open(config.bf_valid_path, 'rb'))

    train_labels = []
    validation_labels = []
    k = 0
    for i in config.classes:
        train_labels += [k] * util.get_dir_imgs_number(os.path.join(config.train_dir, i))
        validation_labels += [k] * util.get_dir_imgs_number(os.path.join(config.validation_dir, i))
        k += 1

    model = get_top_model_for_VGG16(shape=train_data.shape[1:], nb_class=len(config.classes), W_regularizer=True)
    rms = RMSprop(lr=5e-4, rho=0.9, epsilon=1e-08, decay=0.01)
    model.compile(optimizer=rms, loss='sparse_categorical_crossentropy', metrics=['accuracy'])

    early_stopping = EarlyStopping(verbose=1, patience=20, monitor='val_loss')
    model_checkpoint = ModelCheckpoint(
        config.get_top_model_weights_path(),
        save_best_only=True,
        save_weights_only=True,
        monitor='val_loss')
    callbacks_list = [early_stopping, model_checkpoint]

    history = model.fit(
        train_data,
        train_labels,
        nb_epoch=top_model_nb_epoch,
        validation_data=(validation_data, validation_labels),
        callbacks=callbacks_list,
        class_weight=class_weight)

    util.save_history(history=history, prefix='bottleneck')
Пример #2
0
def tune(X_train, X_test, y_train, y_test):
    Y_train = np_utils.to_categorical(y_train, config.nb_class)
    Y_test = np_utils.to_categorical(y_test, config.nb_class)

    model = None
    model = util.load_alexnet_model(weights_path=None, nb_class=config.nb_class)

    model.compile(
        loss='categorical_crossentropy',
        optimizer=SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    print "Training from scratch CNN.."

    hist = model.fit(X_train, Y_train,
              nb_epoch=200, batch_size=32,verbose=1,
              validation_data=(X_test, Y_test))

    util.save_history(hist,"alex_scratch_fold"+ str(fold_count),fold_count)

    scores = model.evaluate(X_test, Y_test, verbose=0)
    print("Softmax %s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

    model.save_weights("models/alex_scratch_weights"+ str(fold_count) +".h5")

    # Clear memory
    X_train = None
    Y_train = None
    X_test = None
    Y_test = None

    return scores[1]
Пример #3
0
def train_top_model(X_train, X_test, y_train, y_test):

    model = util.load_alex_finetune56_finetune567(nb_class=config.nb_class, weights_path=config.alexnet_weights_path,top_model_weight_path="models/alex_finetune56_weights" + str(fold_count) + ".h5")

    print "\nTraining CNN.."
    Y_train = np_utils.to_categorical(y_train, config.nb_class)
    Y_test = np_utils.to_categorical(y_test, config.nb_class)

    shape=X_train.shape[1:]

    model.compile(
        loss='categorical_crossentropy',
        #optimizer=SGD(lr=0.00001, decay=1e-6, momentum=0.9, nesterov=True),
        optimizer=SGD(lr=0.00001, momentum=0.9),
        metrics=['accuracy'])

    hist = model.fit(X_train, Y_train,
              nb_epoch=2, batch_size=32,verbose=1,
              validation_data=(X_test, Y_test))

    util.save_history(hist,"finetune56_finetune567_fold"+ str(fold_count),fold_count)

    scores = model.evaluate(X_test, Y_test, verbose=0)
    model.save_weights("models/alex_finetune56_finetune567" + str(fold_count) + ".h5")
    #model.save_weights("model/alex_topmodel" + str(fold_count) + ".h5")
    print("Softmax %s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

    return scores[1]
def train_top_model(y_train, y_test):
    X_train = np.load(open('alex_bottleneck_features_train'+ str(fold_count) + '.npy' , 'rb'))
    X_test = np.load(open('alex_bottleneck_features_validation'+ str(fold_count) + '.npy', 'rb'))

    print "\nTraining CNN.."
    Y_train = np_utils.to_categorical(y_train, config.nb_class)
    Y_test = np_utils.to_categorical(y_test, config.nb_class)

    shape=X_train.shape[1:]

    model = None # Clear Model

    model = util.get_top_model_for_alexnet(
        shape=shape,
        nb_class=config.nb_class)

    model.compile(
        loss='categorical_crossentropy',
        optimizer=SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    hist = model.fit(X_train, Y_train,
              nb_epoch=30, batch_size=32,verbose=1,
              validation_data=(X_test, Y_test))

    util.save_history(hist,"deeepfeatures+finetune56_fold"+ str(fold_count),fold_count)

    scores = model.evaluate(X_test, Y_test, verbose=0)
    model.save_weights("models/alex_topmodel_deepfeatures+finetune56" + str(fold_count) + ".h5")
    print("Softmax %s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

    return scores[1]
def tune(lr=0.0001, class_weight=None):
    model = load_model(nb_class=len(config.classes), weights_path=config.get_top_model_weights_path())

    model.compile(
        loss='categorical_crossentropy',
        optimizer=SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    # prepare data augmentation configuration
    train_datagen = ImageDataGenerator(
        rotation_range=30.,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)
    util.apply_mean(train_datagen)

    train_generator = train_datagen.flow_from_directory(
        config.train_dir,
        target_size=config.img_size,
        classes=config.classes)

    test_datagen = ImageDataGenerator()
    util.apply_mean(test_datagen)

    validation_generator = test_datagen.flow_from_directory(
        config.validation_dir,
        target_size=config.img_size,
        classes=config.classes)

    early_stopping = EarlyStopping(verbose=1, patience=30, monitor='val_loss')
    model_checkpoint = ModelCheckpoint(config.get_fine_tuned_weights_path(checkpoint=True),
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor='val_loss')
    history = model.fit_generator(
        train_generator,
        samples_per_epoch=config.nb_train_samples,
        nb_epoch=fine_tuning_nb_epoch,
        validation_data=validation_generator,
        nb_val_samples=config.nb_validation_samples,
        callbacks=[early_stopping, model_checkpoint],
        class_weight=class_weight)

    util.save_history(history=history, prefix='fine-tuning')
    util.save_classes(config.classes)

    _cleanup()
Пример #6
0
def tune(X_train, X_test, y_train, y_test):
    print y_train
    Y_train = np_utils.to_categorical(y_train, config.nb_class)
    Y_test = np_utils.to_categorical(y_test, config.nb_class)

    model = None
    model = util.load_alexnet_model_finetune567(weights_path=config.alexnet_weights_path, nb_class=config.nb_class)

    model.compile(

        loss='sparse_categorical_crossentropy',
        optimizer=SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    print "Fine-tuning CNN.."

    #Real-time Data Augmentation using In-Built Function of Keras
    datagen = ImageDataGenerator(rotation_range=40,
                                 width_shift_range=0.3,
                                 height_shift_range=0.3,
                                 horizontal_flip=True,
                                 zoom_range = 0.25,
                                 shear_range = 0.25,
                                 fill_mode='nearest')
    datagen.fit(X_train)
    hist = model.fit_generator(datagen.flow(X_train, y_train, batch_size=32), nb_epoch=400,
                        samples_per_epoch=X_train.shape[0], validation_data = (X_test,y_test))

    #hist = model.fit(X_train, Y_train,
    #          nb_epoch=400, batch_size=32,verbose=1,
    #          validation_data=(X_test, Y_test))

    util.save_history(hist,"alex_finetune567_aug_fold"+ str(fold_count),fold_count)

    model.save_weights("models/alex_finetune567_aug_weights"+ str(fold_count) +".h5")

    #scores = model.evaluate(X_test, y_test, verbose=0)
    #print("Softmax %s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

    # Clear memory
    model= None
    X_train = None
    Y_train = None
    X_test = None
    Y_test = None
Пример #7
0
    def _fine_tuning(self):
        self.freeze_top_layers()

        self.model.compile(
            loss='binary_crossentropy',
            optimizer=Adam(lr=1e-5),
            #optimizer=SGD(lr=5e-6, momentum=0.9),
            metrics=['binary_accuracy'])

        train_data = self.get_train_datagen(
            rescale=1. / 255,
            rotation_range=60.,
            #shear_range=0.2,
            #zoom_range=0.2,
            width_shift_range=0.2,
            height_shift_range=0.2,
            horizontal_flip=True,
            vertical_flip=True)
        callbacks = self.get_callbacks(config.get_fine_tuned_weights_path(),
                                       patience=self.fine_tuning_patience)

        if util.is_keras2():
            hist = self.model.fit_generator(
                train_data,
                steps_per_epoch=config.nb_train_samples /
                float(self.batch_size),
                epochs=self.nb_epoch,
                validation_data=self.get_validation_datagen(rescale=1. / 255),
                #validation_data=self.get_validation_datagen(),
                validation_steps=config.nb_validation_samples /
                float(self.batch_size),
                callbacks=callbacks,
                class_weight=self.class_weight)
        else:
            hist = self.model.fit_generator(
                train_data,
                samples_per_epoch=config.nb_train_samples,
                nb_epoch=self.nb_epoch,
                validation_data=self.get_validation_datagen(),
                nb_val_samples=config.nb_validation_samples,
                callbacks=callbacks,
                class_weight=self.class_weight)
        print(hist.history)
        util.save_history(history=hist, prefix=time.time())
        self.model.save(config.get_model_path())
        max_epochs=max_epochs,
        patience=patience)


    loss, epoch, history, output_layer = main(dataset, verbose=verbose,
                                              **config)

    out_params = dict(
        batch_size=batch_size,
        hidden_pre=[nunits]*(nlayers-1),
        dropout=dropout,
        hidden_f='rectify',
        bottleneck_size=bottleneck_size,
        bottleneck_f='linear',
        hidden_post=[nunits],
        output_f='softmax')
    save_model(output_layer, out_params, output)

    save_history(history, output + 'history')

    with open(output, 'wb') as fout:
        result = dict(
            descr="""Trained network.""",
            config=config,
            loss=loss,
            epoch=epoch,
            history=history,
            network=output_layer)
        pickle.dump(result, fout, -1)
Пример #9
0
def tune(X_train, X_test, y_train, y_test):
    Y_train = np_utils.to_categorical(y_train, config.nb_class)
    Y_test = np_utils.to_categorical(y_test, config.nb_class)

    model = util.load_alexnet_model(weights_path=config.alexnet_weights_path, nb_class=config.nb_class)

    model.compile(
        loss='categorical_crossentropy',
        optimizer=SGD(lr=1e-6, momentum=0.9),
        metrics=['accuracy'])

    print "Fine-tuning CNN.."

    hist = model.fit(X_train, Y_train,
              nb_epoch=4000, batch_size=32,verbose=1,
              validation_data=(X_test, Y_test))

    util.save_history(hist,"finetune567_fold"+ str(fold_count),fold_count)

    scores = model.evaluate(X_test, Y_test, verbose=0)
    print("Softmax %s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

    model.save_weights("models/finetune567_weights"+ str(fold_count) +".h5")

    model = None
    model = util.load_svm_alex_model(weights_path=config.alexnet_weights_path, nb_class=config.nb_class)
    print "Generating train features for SVM.."
    svm_train = model.predict(X_train)
    print svm_train.shape
    print "Generating test features for SVM.."
    svm_test = model.predict(X_test)
    print svm_test.shape

    print "\nTraining SVM.."
    clf = svm.SVC(kernel='linear', gamma=0.7, C=1.0)

    clf.fit(svm_train, y_train.ravel())
    #y_pred = clf.predict(test_data)
    score = clf.score(svm_test, y_test.ravel())
    print("SVM %s: %.2f%%" % ("acc: ", score*100))

    y_pred = clf.predict(svm_test)

    target_names = ['AisKacang' , 'AngKuKueh' , 'ApamBalik' , 'Asamlaksa' , 'Bahulu' , 'Bakkukteh',
     'BananaLeafRice' , 'Bazhang' , 'BeefRendang' , 'BingkaUbi' , 'Buburchacha',
     'Buburpedas' , 'Capati' , 'Cendol' , 'ChaiTowKuay' , 'CharKuehTiao' , 'CharSiu',
     'CheeCheongFun' , 'ChiliCrab' , 'Chweekueh' , 'ClayPotRice' , 'CucurUdang',
     'CurryLaksa' , 'CurryPuff' , 'Dodol' , 'Durian' , 'DurianCrepe' , 'FishHeadCurry',
     'Guava' , 'HainaneseChickenRice' , 'HokkienMee' , 'Huatkuih' , 'IkanBakar',
     'Kangkung' , 'KayaToast' , 'Keklapis' , 'Ketupat' , 'KuihDadar' , 'KuihLapis',
     'KuihSeriMuka' , 'Langsat' , 'Lekor' , 'Lemang' , 'LepatPisang' , 'LorMee',
     'Maggi goreng' , 'Mangosteen' , 'MeeGoreng' , 'MeeHoonKueh' , 'MeeHoonSoup',
     'MeeJawa' , 'MeeRebus' , 'MeeRojak' , 'MeeSiam' , 'Murtabak' , 'Murukku',
     'NasiGorengKampung' , 'NasiImpit' , 'Nasikandar' , 'Nasilemak' , 'Nasipattaya',
     'Ondehondeh' , 'Otakotak' , 'OysterOmelette' , 'PanMee' , 'PineappleTart',
     'PisangGoreng' , 'Popiah' , 'PrawnMee' , 'Prawnsambal' , 'Puri' , 'PutuMayam',
     'PutuPiring' , 'Rambutan' , 'Rojak' , 'RotiCanai' , 'RotiJala' , 'RotiJohn',
     'RotiNaan' , 'RotiTissue' , 'SambalPetai' , 'SambalUdang' , 'Satay' , 'Sataycelup',
     'SeriMuka' , 'SotoAyam' , 'TandooriChicken' , 'TangYuan' , 'TauFooFah',
     'TauhuSumbat' , 'Thosai' , 'TomYumSoup' , 'Wajik' , 'WanTanMee' , 'WaTanHo' , 'Wonton',
     'YamCake' , 'YongTauFu' , 'Youtiao' , 'Yusheng']
    cm = confusion_matrix(y_test, y_pred)
    print(classification_report(y_test, y_pred,target_names=target_names))
    print(cm)

    # Visualization of confusion matrix
    #np.set_printoptions(precision=2)
    #plt.figure()
    #plot_confusion_matrix(cm)
    #plt.show()

    # Clear memory
    X_train = None
    Y_train = None
    svm_train = None
    svm_test = None

    return score
Пример #10
0
# -*- coding: utf-8 -*-
"""
Created on Tue Jun  4 12:31:17 2019

@author: emile
"""
from model import unet
import segmentation_5_classes_gray
from util import save_history
from train_with_Generator import train_model

x_train, y_train, x_val, y_val = segmentation_5_classes_gray.loadSegm5ClassesGrayData(
)
history = train_model(x_train,
                      y_train, (x_val, y_val),
                      nb_epochs=10,
                      create_new_model=False)

save_history(history)
Пример #11
0
def train(current_gpu, args):
    best_acc1 = -1
    model_history = {}
    model_history = util.init_modelhistory(model_history)
    train_start = time.time()

    ## choose model from pytorch model_zoo
    model = util.torch_model(args.model_name, pretrained=True)
    loss_fn = nn.CrossEntropyLoss().cuda()

    ## distributed_setting
    model, args = dis_util.dist_setting(current_gpu, model, loss_fn, args)

    ## CuDNN library will benchmark several algorithms and pick that which it found to be fastest
    cudnn.benchmark = False if args.seed else True

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    if args.apex:
        model, optimizer = dis_util.apex_init(model, optimizer, args)


#     args.collate_fn = partial(dis_util.fast_collate, memory_format=args.memory_format)

    args = _get_images(args, data_type='train')
    train_loader, train_sampler = _get_train_data_loader(args, **args.kwargs)
    test_loader = _get_test_data_loader(args, **args.kwargs)

    logger.info("Processes {}/{} ({:.0f}%) of train data".format(
        len(train_loader.sampler), len(train_loader.dataset),
        100. * len(train_loader.sampler) / len(train_loader.dataset)))

    logger.info("Processes {}/{} ({:.0f}%) of test data".format(
        len(test_loader.sampler), len(test_loader.dataset),
        100. * len(test_loader.sampler) / len(test_loader.dataset)))

    for epoch in range(1, args.num_epochs + 1):
        ##
        batch_time = util.AverageMeter('Time', ':6.3f')
        data_time = util.AverageMeter('Data', ':6.3f')
        losses = util.AverageMeter('Loss', ':.4e')
        top1 = util.AverageMeter('Acc@1', ':6.2f')
        top5 = util.AverageMeter('Acc@5', ':6.2f')
        progress = util.ProgressMeter(
            len(train_loader), [batch_time, data_time, losses, top1, top5],
            prefix="Epoch: [{}]".format(epoch))

        trn_loss = []
        model.train()
        end = time.time()
        running_loss = 0.0
        ## Set epoch count for DistributedSampler
        if args.multigpus_distributed:
            train_sampler.set_epoch(epoch)

        prefetcher = util.data_prefetcher(train_loader)
        input, target = prefetcher.next()
        batch_idx = 0
        while input is not None:

            batch_idx += 1

            if args.prof >= 0 and batch_idx == args.prof:
                print("Profiling begun at iteration {}".format(batch_idx))
                torch.cuda.cudart().cudaProfilerStart()

            if args.prof >= 0:
                torch.cuda.nvtx.range_push(
                    "Body of iteration {}".format(batch_idx))

            util.adjust_learning_rate(optimizer, epoch, batch_idx,
                                      len(train_loader), args)

            ##### DATA Processing #####
            targets_gra = target[:, 0]
            targets_vow = target[:, 1]
            targets_con = target[:, 2]

            # 50%의 확률로 원본 데이터 그대로 사용
            if np.random.rand() < 0.5:
                logits = model(input)
                grapheme = logits[:, :168]
                vowel = logits[:, 168:179]
                cons = logits[:, 179:]

                loss1 = loss_fn(grapheme, targets_gra)
                loss2 = loss_fn(vowel, targets_vow)
                loss3 = loss_fn(cons, targets_con)

            else:

                lam = np.random.beta(1.0, 1.0)
                rand_index = torch.randperm(input.size()[0])
                shuffled_targets_gra = targets_gra[rand_index]
                shuffled_targets_vow = targets_vow[rand_index]
                shuffled_targets_con = targets_con[rand_index]

                bbx1, bby1, bbx2, bby2 = _rand_bbox(input.size(), lam)
                input[:, :, bbx1:bbx2, bby1:bby2] = input[rand_index, :,
                                                          bbx1:bbx2, bby1:bby2]
                # 픽셀 비율과 정확히 일치하도록 lambda 파라메터 조정
                lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) /
                           (input.size()[-1] * input.size()[-2]))

                logits = model(input)
                grapheme = logits[:, :168]
                vowel = logits[:, 168:179]
                cons = logits[:, 179:]

                loss1 = loss_fn(grapheme, targets_gra) * lam + loss_fn(
                    grapheme, shuffled_targets_gra) * (1. - lam)
                loss2 = loss_fn(vowel, targets_vow) * lam + loss_fn(
                    vowel, shuffled_targets_vow) * (1. - lam)
                loss3 = loss_fn(cons, targets_con) * lam + loss_fn(
                    cons, shuffled_targets_con) * (1. - lam)

            loss = 0.5 * loss1 + 0.25 * loss2 + 0.25 * loss3
            trn_loss.append(loss.item())
            running_loss += loss.item()

            #########################################################

            # compute gradient and do SGD step
            optimizer.zero_grad()

            if args.apex:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            optimizer.step()
            # Printing vital information
            if (batch_idx + 1) % (args.log_interval) == 0:
                s = f'[Epoch {epoch} Batch {batch_idx+1}/{len(train_loader)}] ' \
                f'loss: {running_loss / args.log_interval:.4f}'
                print(s)
                running_loss = 0

            if True or batch_idx % args.log_interval == 0:
                # Every log_interval iterations, check the loss, accuracy, and speed.
                # For best performance, it doesn't make sense to print these metrics every
                # iteration, since they incur an allreduce and some host<->device syncs.

                # Measure accuracy
                prec1, prec5 = util.accuracy(logits, target, topk=(1, 5))

                # Average loss and accuracy across processes for logging
                if args.multigpus_distributed:
                    reduced_loss = dis_util.reduce_tensor(loss.data, args)
                    prec1 = dis_util.reduce_tensor(prec1, args)
                    prec5 = dis_util.reduce_tensor(prec5, args)
                else:
                    reduced_loss = loss.data

                # to_python_float incurs a host<->device sync
                losses.update(to_python_float(reduced_loss), input.size(0))
                top1.update(to_python_float(prec1), input.size(0))
                top5.update(to_python_float(prec5), input.size(0))

                ## Waiting until finishing operations on GPU (Pytorch default: async)
                torch.cuda.synchronize()
                batch_time.update((time.time() - end) / args.log_interval)
                end = time.time()

                if current_gpu == 0:
                    print(
                        'Epoch: [{0}][{1}/{2}]  '
                        'Time {batch_time.val:.3f} ({batch_time.avg:.3f})  '
                        'Speed {3:.3f} ({4:.3f})  '
                        'Loss {loss.val:.10f} ({loss.avg:.4f})  '
                        'Prec@1 {top1.val:.3f} ({top1.avg:.3f})  '
                        'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                            epoch,
                            batch_idx,
                            len(train_loader),
                            args.world_size * args.batch_size / batch_time.val,
                            args.world_size * args.batch_size / batch_time.avg,
                            batch_time=batch_time,
                            loss=losses,
                            top1=top1,
                            top5=top5))
                    model_history['epoch'].append(epoch)
                    model_history['batch_idx'].append(batch_idx)
                    model_history['batch_time'].append(batch_time.val)
                    model_history['losses'].append(losses.val)
                    model_history['top1'].append(top1.val)
                    model_history['top5'].append(top5.val)

            input, target = prefetcher.next()

        acc1 = validate(test_loader, model, loss_fn, epoch, model_history,
                        trn_loss, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multigpus_distributed or (args.multigpus_distributed and
                                              args.rank % args.num_gpus == 0):
            util.save_history(
                os.path.join(args.output_data_dir, 'model_history.p'),
                model_history)

            util.save_model(
                {
                    'epoch': epoch + 1,
                    'model_name': args.model_name,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                    #                 'class_to_idx' : train_loader.dataset.class_to_idx,
                },
                is_best,
                args.model_dir)
Пример #12
0
    def _fine_tuning(self):
        self.freeze_top_layers1()
        train_data = self.get_train_datagen(
            rotation_range=30.,
            shear_range=0.2,
            zoom_range=0.2,
            horizontal_flip=True,
            preprocessing_function=self.preprocess_input)
        checkpoint_dir = os.path.join(os.path.abspath('.'), 'checkpoint')
        callbacks = self.get_callbacks(config.get_fine_tuned_weights_path(),
                                       checkpoint_dir,
                                       patience=self.fine_tuning_patience)

        if util.is_keras2():
            if config.isCenterLoss:
                self.center_model.load_weights(
                    '/home/yuzhg/Inception-v3/trained/fine-tuned-best-inception-weights.h5',
                    by_name=True)
                self.center_model.compile(loss=[
                    'categorical_crossentropy', lambda y_true, y_pred: y_pred
                ],
                                          loss_weights=[1, 0.2],
                                          metrics=['accuracy'],
                                          optimizer=Adam(lr=1e-5))
                self.center_model.summary()
                self.history = self.center_model.fit_generator(
                    util.clone_y_generator(train_data),
                    steps_per_epoch=config.nb_train_samples /
                    float(self.batch_size),
                    epochs=self.nb_epoch,
                    validation_data=util.clone_y_generator(
                        self.get_validation_datagen()),
                    validation_steps=config.nb_validation_samples /
                    float(self.batch_size),
                    callbacks=callbacks,
                    class_weight=self.class_weight)
            elif config.isTripletLoss:
                self.triplet_model.load_weights(
                    '/home/yuzhg/Inception-v3/trained/fine-tuned-best-inception-weights.h5',
                    by_name=True)
                #self.triplet_model.compile(loss=self.hard_triplet_loss, optimizer=Adam(lr=1e-5), metrics=['accuracy'])
                self.triplet_model.compile(
                    optimizer=Adam(lr=1e-5),
                    loss=['categorical_crossentropy', self.hard_triplet_loss],
                    loss_weights=[1.0, 1.0],
                    metrics=['accuracy'])
                self.triplet_model.summary()
                valid_data = self.get_validation_datagen(
                    rotation_range=30.,
                    shear_range=0.2,
                    zoom_range=0.2,
                    horizontal_flip=True,
                    preprocessing_function=self.preprocess_input)

                # util.clone_y_generator1(train_data),
                self.history = self.triplet_model.fit_generator(
                    #util.triplet_transformed_generator(train_data, 4096),
                    util.clone_y_generator1(train_data),
                    steps_per_epoch=config.nb_train_samples /
                    float(self.batch_size),
                    epochs=self.nb_epoch,
                    #validation_data=util.triplet_transformed_generator(valid_data, 4096),
                    validation_data=util.clone_y_generator1(valid_data),
                    validation_steps=config.nb_validation_samples /
                    float(self.batch_size),
                    callbacks=callbacks,
                    class_weight=self.class_weight)
            else:
                self.model.load_weights(
                    '/home/yuzhg/Inception-v3/trained/fine-tuned-best-inception-weights.h5',
                    by_name=True)
                self.model.compile(loss='categorical_crossentropy',
                                   optimizer=Adam(lr=1e-5),
                                   metrics=['accuracy'])

                self.model.summary()
                self.history = self.model.fit_generator(
                    train_data,
                    steps_per_epoch=config.nb_train_samples /
                    float(self.batch_size),
                    epochs=self.nb_epoch,
                    validation_data=self.get_validation_datagen(
                        rotation_range=30.,
                        shear_range=0.2,
                        zoom_range=0.2,
                        horizontal_flip=True,
                        preprocessing_function=self.preprocess_input),
                    validation_steps=config.nb_validation_samples /
                    float(self.batch_size),
                    callbacks=callbacks,
                    class_weight=self.class_weight)

        # else:
        #     if config.isCenterLoss:
        #         self.center_model.compile(loss=['categorical_crossentropy', lambda y_true, y_pred:y_pred],
        #                            loss_weights=[1, 0.2], metrics=['accuracy'],
        #                            optimizer=Adam(lr=1e-5))
        #         self.center_model.summary()
        #         self.history = self.center_model.fit_generator(
        #             util.clone_y_generator(train_data),
        #             samples_per_epoch=config.nb_train_samples,
        #             nb_epoch=self.nb_epoch,
        #             validation_data=util.clone_y_generator(self.get_validation_datagen()),
        #             nb_val_samples=config.nb_validation_samples,
        #             callbacks=callbacks,
        #             class_weight=self.class_weight)
        #     elif config.isTripletLoss:
        #         self.triplet_model.compile(loss=triplet_loss, optimizer=Adam(lr=1e-5))
        #         self.triplet_model.summary()
        #         self.history = self.triplet_model.fit_generator(
        #             util.clone_y_generator(train_data),
        #             steps_per_epoch=config.nb_train_samples / float(self.batch_size),
        #             epochs=self.nb_epoch,
        #             validation_data=util.clone_y_generator(self.get_validation_datagen()),
        #             validation_steps=config.nb_validation_samples / float(self.batch_size),
        #             callbacks=callbacks,
        #             class_weight=self.class_weight
        #         )
        #     else:
        #         self.model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-5), metrics=['accuracy'])
        #         self.model.summary()
        #         self.history = self.model.fit_generator(
        #             train_data,
        #             steps_per_epoch=config.nb_train_samples / float(self.batch_size),
        #             epochs=self.nb_epoch,
        #             validation_data=self.get_validation_datagen(),
        #             validation_steps=config.nb_validation_samples / float(self.batch_size),
        #             callbacks=callbacks,
        #             class_weight=self.class_weight
        #         )

        if config.isCenterLoss:
            #self.center_model.save_weights('vgg16-model-weights.h5')
            self.center_model.save(config.get_model_path())
            util.save_history(self.history, self.center_model)
        elif config.isTripletLoss:
            self.triplet_model.save(config.get_model_path())
            util.save_history(self.history, self.triplet_model)
        else:
            self.model.save(config.get_model_path())
            util.save_history(self.history, self.model)
Пример #13
0
                model_file_prev.format(model_name=model_name, type="weights"))
        else:
            callbacks = dqn.fit(env,
                                nb_steps=nb_steps,
                                visualize=False,
                                verbose=2,
                                callbacks=[Logger2048(verbose=verbose)])
            dqn.save_weights(model_file.format(model_name=model_name,
                                               type="weights"),
                             overwrite=True)
            model.save(
                model_file.format(model_name=model_name, type="topology"))
            my_logger = get_callback(callbacks.callbacks, Logger2048)
            logger_rl = get_callback(callbacks.callbacks, TrainEpisodeLogger)
            save_history(
                my_logger,
                logger_file.format(type="train", model_name=model_name))
            save_history(
                logger_rl,
                logger_rl_file.format(type="train", model_name=model_name))
            print("Training done!")

        print("Testing...")

        dqn_tests = []
        random_tests = []
        for n in range(n_test_episodes):
            print("TEST EPISODE:", n + 1)
            env = GymBoard()
            callbacks = dqn.test(env,
                                 nb_episodes=1,
Пример #14
0
def train(local_rank, args):
    best_acc1 = -1
    model_history = {}
    model_history = util.init_modelhistory(model_history)
    train_start = time.time()

    if local_rank is not None:
        args.local_rank = local_rank
        
    # distributed_setting
    if args.multigpus_distributed:
        args = dis_util.dist_setting(args)



    # choose model from pytorch model_zoo
    model = util.torch_model(
        args.model_name,
        num_classes=args.num_classes,
        pretrained=True,
        local_rank=args.local_rank,
        model_parallel=args.model_parallel)  # 1000 resnext101_32x8d
    criterion = nn.CrossEntropyLoss().cuda()

    model, args = dis_util.dist_model(model, args)

    # CuDNN library will benchmark several algorithms and pick that which it found to be fastest
    cudnn.benchmark = False if args.seed else True

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    if args.apex:
        model, optimizer, args = dis_util.apex_init(model, optimizer, args)
    elif args.model_parallel:
        model, optimizer, args = dis_util.smp_init(model, optimizer, args)
    elif args.data_parallel:
        model, optimizer, args = dis_util.sdp_init(model, optimizer, args)

    train_loader, train_sampler = _get_train_data_loader(args, **args.kwargs)

    logger.info("Processes {}/{} ({:.0f}%) of train data".format(
        len(train_loader.sampler), len(train_loader.dataset),
        100. * len(train_loader.sampler) / len(train_loader.dataset)))

    test_loader = _get_test_data_loader(args, **args.kwargs)

    #     if args.rank == 0:
    logger.info("Processes {}/{} ({:.0f}%) of test data".format(
        len(test_loader.sampler), len(test_loader.dataset),
        100. * len(test_loader.sampler) / len(test_loader.dataset)))

    print(" local_rank : {}, local_batch_size : {}".format(
        local_rank, args.batch_size))

    for epoch in range(1, args.num_epochs + 1):
        ##
        batch_time = util.AverageMeter('Time', ':6.3f')
        data_time = util.AverageMeter('Data', ':6.3f')
        losses = util.AverageMeter('Loss', ':.4e')
        top1 = util.AverageMeter('Acc@1', ':6.2f')
        top5 = util.AverageMeter('Acc@5', ':6.2f')
        progress = util.ProgressMeter(
            len(train_loader), [batch_time, data_time, losses, top1, top5],
            prefix="Epoch: [{}]".format(epoch))

        model.train()
        end = time.time()

        # Set epoch count for DistributedSampler
        if args.multigpus_distributed and not args.model_parallel:
            train_sampler.set_epoch(epoch)

        for batch_idx, (input, target) in enumerate(train_loader):
            input = input.to(args.device)
            target = target.to(args.device)
            batch_idx += 1

            if args.model_parallel:
                print("** smp_train_step **")
                output, loss = dis_util.train_step(model, criterion, input,
                                                   target, args.scaler, args)
                # Rubik: Average the loss across microbatches.
                loss = loss.reduce_mean()

                print("reduce_mean : {}".format(loss))
            else:
                #                 print("** not model_parallel")
                output = model(input)
                loss = criterion(output, target)

            # compute gradient and do SGD step
            optimizer.zero_grad()

            if args.apex:
                dis_util.apex_loss(loss, optimizer)
            elif not args.model_parallel:
                loss.backward()

            optimizer.step()

            if args.rank == 0:
                #             if args.rank == 0 and batch_idx % args.log_interval == 1:
                # Every print_freq iterations, check the loss, accuracy, and speed.
                # For best performance, it doesn't make sense to print these metrics every
                # iteration, since they incur an allreduce and some host<->device syncs.

                if args.model_parallel:
                    output = torch.cat(output.outputs)

                # Measure accuracy
                prec1, prec5 = util.accuracy(output, target, topk=(1, 5))

                # to_python_float incurs a host<->device sync
                losses.update(util.to_python_float(loss), input.size(0))
                top1.update(util.to_python_float(prec1), input.size(0))
                top5.update(util.to_python_float(prec5), input.size(0))

                # Waiting until finishing operations on GPU (Pytorch default: async)
                torch.cuda.synchronize()
                batch_time.update((time.time() - end) / args.log_interval)
                end = time.time()

                #                 if args.rank == 0:
                print('Epoch: [{0}][{1}/{2}] '
                      'Train_Time={batch_time.val:.3f}: avg-{batch_time.avg:.3f}, '
                      'Train_Speed={3:.3f} ({4:.3f}), '
                      'Train_Loss={loss.val:.10f}:({loss.avg:.4f}), '
                      'Train_Prec@1={top1.val:.3f}:({top1.avg:.3f}), '
                      'Train_Prec@5={top5.val:.3f}:({top5.avg:.3f})'.format(
                          epoch,
                          batch_idx,
                          len(train_loader),
                          args.world_size * args.batch_size / batch_time.val,
                          args.world_size * args.batch_size / batch_time.avg,
                          batch_time=batch_time,
                          loss=losses,
                          top1=top1,
                          top5=top5))

        acc1 = validate(test_loader, model, criterion, epoch, model_history,
                        args)

        is_best = False

        if args.rank == 0:
            is_best = acc1 > best_acc1
            best_acc1 = max(acc1, best_acc1)

        if not args.multigpus_distributed or (args.multigpus_distributed
                                              and not args.model_parallel
                                              and args.rank == 0):
            model_history['epoch'].append(epoch)
            model_history['batch_idx'].append(batch_idx)
            model_history['batch_time'].append(batch_time.val)
            model_history['losses'].append(losses.val)
            model_history['top1'].append(top1.val)
            model_history['top5'].append(top5.val)

            util.save_history(
                os.path.join(args.output_data_dir, 'model_history.p'),
                model_history)
            util.save_model(
                {
                    'epoch': epoch + 1,
                    'model_name': args.model_name,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                    'class_to_idx': train_loader.dataset.class_to_idx,
                }, is_best, args)
        elif args.model_parallel:
            if args.rank == 0:
                util.save_history(
                    os.path.join(args.output_data_dir, 'model_history.p'),
                    model_history)
            dis_util.smp_savemodel(model, optimizer, is_best, args)