示例#1
0
def main(command, opt):
    img_shape = (3, 64, 64)

    acgan = command == 'acgan'
    dcgan = command == 'dcgan'

    if opt.fix_randomseed:
        np.random.seed(47)
        torch.manual_seed(47)
        torch.cuda.manual_seed_all(47)

    if acgan:
        generator = utils.loadModel(opt.model, ACGAN_Generator())

        feature = utils.faceFeatures[7]
        print("Using Feature: {}".format(feature))

        z = torch.from_numpy(np.random.normal(0, 1, size=(10, opt.latent_dim))).type(torch.float)
        z = torch.cat((z, z), dim=0)
        labels = torch.from_numpy(np.array([[1 - num, num] for num in range(2) for _ in range(10)])).type(torch.float)
        
        gen_imgs = generator(z, labels)
        save_image(gen_imgs.data, os.path.join(opt.output, "fig2_2.jpg"), nrow=10, normalize=True)


    if dcgan:
        generator = utils.loadModel(opt.model, DCGAN_Generator(img_shape))
    
        z = torch.from_numpy(np.random.normal(0, 1, size=(32, opt.latent_dim, 1, 1))).type(torch.float)
        gen_imgs = generator(z)

        save_image(gen_imgs.data, os.path.join(opt.output, "fig1_2.jpg"), nrow=8, normalize=True)
示例#2
0
def main(opt):
    """ Main process of test.py """
    # Load Model
    model = utils.loadModel(opt.checkpoint,
                            ImproveNet(opt.rb),
                            dataparallel=True)
    device = utils.selectDevice(
    ) if opt.cuda and torch.cuda.is_available() else 'cpu'

    if opt.normalize:
        model = nn.Sequential(
            model,
            InverseMeanShift(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])).to(device)

    # Inference the images on the test set
    predict(opt, model, opt.hazy, opt.dehazy, device)

    # Measure the performance on the validation set
    # predict(opt, net, opt.hazy, opt.dehazy, device)

    # Measure the performance on the test set
    gts = sorted(utils.load_all_image(opt.gt))
    dehazes = sorted(utils.load_all_image(opt.dehazy))

    if opt.record is None:
        validate(dehazes, gts)

    if opt.record is not None:
        validate(dehazes, gts, os.path.join(opt.dehazy, opt.record))

    return
示例#3
0
def main():
    if not os.path.exists(opt.output):
        os.makedirs(opt.output, exist_ok=True)

    opt.output = os.path.join(opt.output, 'p1_valid.txt')

    extractor = resnet50(pretrained=True)
    classifier = utils.loadModel(opt.resume, Classifier(8192, [2048], 11))
    extractor, classifier = extractor.to(DEVICE), classifier.to(DEVICE)

    predict_set = dataset.TrimmedVideos(opt.video,
                                        opt.label,
                                        None,
                                        sample=4,
                                        transform=transforms.Compose([
                                            transforms.ToTensor(),
                                            transforms.Normalize(
                                                mean=[0.485, 0.456, 0.406],
                                                std=[0.229, 0.224, 0.225]),
                                        ]))
    print("Dataset: {}".format(len(predict_set)))
    predict_loader = DataLoader(predict_set,
                                batch_size=opt.batch_size,
                                shuffle=False,
                                num_workers=opt.threads)

    # Predict
    results = predict(extractor, classifier, predict_loader)
    np.savetxt(opt.output, results, fmt='%d')
    print("Output File have been written to {}".format(opt.output))
    def prepare(self):

        print('** Preparing Image Retriever **')
        self.tree = utils.loadModel(self.savedModelDataPath)

        if self.tree is not None:
            print('Model loaded from', self.savedModelDataPath)
        else:
            print('! Could not load model.')
            return

        # Search object for looking up matches.
        self.search = Vocabulary_Tree_Searcher(
            self.tree.meanDescriptorClusters, self.tree.indexedTree)

        # Entropies for the leaves.
        self.clusterEntropies = self.tree.leafClusterEntropies

        # Max number of matches limit.
        self.maxNumOfMatches = np.minimum(self.maxNumOfMatches,
                                          len(self.tree.sourceToLeafIndex))

        print("MATCHER READY -> maxNumOfMatches: %d" % self.maxNumOfMatches)

        self.prepared = True
示例#5
0
    def run(self):
        starttime = time.time()
        rootName = (self.rootDir)

        if os.path.exists(rootName):
            root = loadModel(rootName)
        else:
            dictName = self.dictDir
            word_freq = loadWords(dictName)
            root = TrieNode('*', word_freq)
            saveModel(root, rootName)

        # 加载新的文章
        fileName = self.demoDir
        data = self.loadData(fileName, self.stopwords)
        # 将新的文章插入到Root中
        self.loadData2Root(root, data)

        # 定义取TOP5个
        N = 5
        result, add_word = root.wordFind(N)
        # 如果想要调试和选择其他的阈值,可以print result来调整
        print("\n----\n", '增加了 %d 个新词, 词语和得分分别为: \n' % len(add_word))
        print('#############################')
        for word, score in add_word.items():
            print(word + ' ---->  ', score)
        print('#############################\n')

        for word, score in add_word.items():
            jieba.add_word(word)

        print("互信息、信息熵:")
        print("".join([(x + '/ ') for x in jieba.cut(self.test_text, cut_all=False) if x not in self.stopwords]))
        endtime = time.time()
        print('time cost:' + str(round((endtime - starttime), 4)) + ' seconds.\n')
def main():
    extractor = resnet50(pretrained=True).to(DEVICE)
    recurrent = utils.loadModel(
        opt.model,
        LSTM_Net(2048,
                 opt.hidden_dim,
                 opt.output_dim,
                 num_layers=opt.layers,
                 bias=True,
                 dropout=opt.dropout,
                 bidirectional=opt.bidirectional,
                 seq_predict=False)).to(DEVICE)

    predict_set = dataset.TrimmedVideos(opt.video,
                                        opt.label,
                                        None,
                                        transform=transforms.Compose([
                                            transforms.ToTensor(),
                                            transforms.Normalize(
                                                mean=[0.485, 0.456, 0.406],
                                                std=[0.229, 0.224, 0.225]),
                                        ]))

    print("Dataset: {}".format(len(predict_set)))
    predict_loader = DataLoader(predict_set,
                                batch_size=opt.batch_size,
                                shuffle=False,
                                num_workers=opt.threads)

    # Predict
    predict(extractor, recurrent, predict_loader)
def runInference(model_path, input_paths, data_type, args=None):
    model_name = path.basename(model_path)
    target, dim_size, image_size = selectTarget(model_path)
    dataset_iterator, labels_dict = utils.loadImageDataset(
        input_paths, data_type, image_size)

    with tf.Session() as sess:
        # Load model
        utils.loadModel(model_path,
                        path.basename(model_path),
                        print_layers=False)
        # Get layer outputs
        features_node = sess.graph.get_tensor_by_name(target)
        # Init storage arrays
        features = np.empty((0, dim_size), np.float32)
        labels = np.empty((0, ), np.int32)
        n_processed = 0
        # Eval loop
        while True:
            try:
                # Evaluate batch
                try:
                    X, y = sess.run(dataset_iterator.get_next())
                    X /= 255.0
                except tf.errors.InternalError:
                    continue
                feed_dict = {INPUT_LAYER.format(model_name): X}
                target_output = sess.run(features_node, feed_dict=feed_dict)
                target_output = np.squeeze(target_output)
                # Stack outputs
                features = np.vstack((features, target_output))
                labels = np.hstack((labels, y))
                # Logging
                n_processed += constants.BATCH_SIZE
                if n_processed % 10 * constants.BATCH_SIZE == 0:
                    print("Processed {} records.".format(n_processed))
            except tf.errors.OutOfRangeError:
                break
        print("Completed extracting features. \n")
        return features, labels, labels_dict
示例#8
0
 def load(self, inputFile):
     print 'OPENING', inputFile
     if inputFile.find('*') != -1:
         self.classifiers = []
         i = 0
         file = inputFile.replace('*', str(i))
         while os.path.isfile(file):
             print 'Loading model', i
             self.classifiers.append(cu.loadModel(file))
             i += 1
             file = inputFile.replace('*', str(i))
         self.subcategories = i
     else:
         print 'WRONG FILE PATTER FOR SUBCATEGORIES MODEL:', inputFile
示例#9
0
def main():
    if not os.path.exists(args.output): 
        os.mkdir(args.output)

    torch.set_default_dtype(torch.float)
    device = utils.selectDevice()

    # Initialize model

    if args.command == "basic":
        model = models.Yolov1_vgg16bn(pretrained=True).to(device)
    elif args.command == "improve":
        model = models.Yolov1_vgg16bn_Improve(pretrained=True).to(device)

    model = utils.loadModel(args.model, model)
    model.eval()
    
    # Initialize dataset

    transform = transforms.Compose([
        transforms.Resize((448, 448)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    # TODO: Reset as MyDataset
    testset = dataset.Testset(img_root=args.images, transform=transform)
    test_loader = DataLoader(testset, batch_size=1, shuffle=False, num_workers=4)

    # Iterative interference

    for batch_idx, (data, imgName) in enumerate(test_loader, 1):
        data = data.to(device)
        output = model(data)

        if args.command == "basic":
            boxes, classIndexs, probs = decode(output, nms=args.nms, prob_min=args.prob, iou_threshold=args.iou)
        if args.command == "improve":
            boxes, classIndexs, probs = decode(output, nms=args.nms, prob_min=args.prob, iou_threshold=args.iou, grid_num=14)

        classNames = labelEncoder.inverse_transform(classIndexs.type(torch.long).to("cpu"))
        
        export(boxes, classNames, probs, imgName[0] + ".txt", args.output)
        
        print("Predicted: [{}/{} ({:.2%})]\r".format(
            batch_idx, len(test_loader.dataset), batch_idx / len(test_loader.dataset)), end=""
        )
示例#10
0
def main():
    valids_p1 = TrimmedVideos(None, opt.label, opt.feature, sample=4, transform=transforms.ToTensor())
    loader_p1 = DataLoader(valids_p1, batch_size=opt.plot_num, shuffle=False)

    valids_p2 = TrimmedVideos(None, opt.label, opt.feature, downsample=12, transform=transforms.ToTensor())
    loader_p2 = DataLoader(valids_p2, batch_size=1, shuffle=False)

    recurrent = utils.loadModel(opt.resume, 
                    LSTM_Net(2048, 128, 11, num_layers=2, bias=True, 
                    dropout=0.2, bidirectional=False, seq_predict=False)
                ).to(DEVICE)

    graph_1 = os.path.join(opt.graph, 'p1_tsne.png')
    graph_2 = os.path.join(opt.graph, 'p2_tsne.png')

    dimension_reduction_cnn(graph_1, loader_p1)
    dimension_reduction_rnn(graph_2, loader_p2, recurrent)
def main():
    
    # load first version of the dataset 
    df = pd.read_csv(processedDataset_path + "finalData.csv", index_col=0)
    labels = df[['churn']]
    df = df.drop(columns=['churn'])
    df = df.drop(columns=['id'])

    # split on training and testing data
    train_X, test_X, train_Y, test_Y = train_test_split(df, labels, test_size=0.33, random_state=42)
    train_Y = train_Y.values.ravel()
    test_Y = test_Y.values.ravel()

    # load all models (version 1)
    logRModel = ut.loadModel(modelsPath+"logR")
    dtreeModel = ut.loadModel(modelsPath+"dtree")
    xgbModel = ut.loadModel(modelsPath+"xgc")

    logR = ut.model_report(logRModel, train_X, test_X, train_Y, test_Y, "LogisticRegression")
    dtree = ut.model_report(dtreeModel, train_X, test_X, train_Y, test_Y, "DecisionTree")
    xgb = ut.model_report(xgbModel, train_X, test_X, train_Y, test_Y, "XGBoost")

    # load second version of the dataset
    df = pd.read_csv(processedDataset_path + "fDataWithoutLastMonth.csv", index_col=0)
    labels = df[['churn']]
    df = df.drop(columns=['churn'])
    df = df.drop(columns=['id'])

    # split on training and testing data
    train_X, test_X, train_Y, test_Y = train_test_split(df, labels, test_size=0.33, random_state=42)
    train_Y = train_Y.values.ravel()
    test_Y = test_Y.values.ravel()

    # load all models (version 2)
    logRWLModel = ut.loadModel(modelsPath + "logRWLM")
    dtreeWLModel = ut.loadModel(modelsPath + "dtreeWLM")
    xgbWLModel = ut.loadModel(modelsPath + "xgcWLM")

    logRWLM = ut.model_report(logRWLModel, train_X, test_X, train_Y, test_Y, "LogisticRegressionWLM")
    dtreeWLM = ut.model_report(dtreeWLModel, train_X, test_X, train_Y, test_Y, "DecisionTreeWLM")
    xgbWLM = ut.model_report(xgbWLModel, train_X, test_X, train_Y, test_Y, "XGBoostWLM")


    model_performances = pd.concat([logR, dtree, xgb, logRWLM, dtreeWLM, xgbWLM], axis=0).reset_index()

    model_performances = model_performances.drop(columns="index", axis=1)

    table = ff.create_table(np.round(model_performances, 4))
    # save html file in iframe_figures
    py.iplot(table)
示例#12
0
def colorize_images():
    folder = "static/Test/"
    model_name='model'
    from utils import save_the_images, loadModel, prepare_accuracy_visualisation_images
    # Firstly, load in the model from previous training sessions
    model = loadModel(model_name)
    # Secondly, load in images to colorise (they only have the lightness channel)
    color_me = []
    color_me = prepare_accuracy_visualisation_images(color_me, folder)
    # Thirdly, Colorize the loaded images
    output = model.predict(color_me)
    output = output * 128 # Turn the -1 to 1 values into proper Lab values.

    # Finally, Save the colorized images
    save_the_images(output, color_me)

    ## Now load those saved images back up so they can be displayed:
    images = []
    # Load in all the images.
    folder = os.listdir('static/Result/')
    for image in folder:
        images.append(image)
    return render_template("show_predicted_images.html", images=images)
def main():
    opt.output = os.path.join(opt.output, 'p2_result.txt')

    extractor = resnet50(pretrained=True).to(DEVICE)
    recurrent = utils.loadModel(
        opt.resume,
        LSTM_Net(2048,
                 opt.hidden_dim,
                 opt.output_dim,
                 num_layers=opt.layers,
                 bias=True,
                 dropout=opt.dropout,
                 bidirectional=opt.bidirectional,
                 seq_predict=False)).to(DEVICE)

    predict_set = dataset.TrimmedVideos(opt.video,
                                        opt.label,
                                        None,
                                        downsample=opt.downsample,
                                        transform=transforms.Compose([
                                            transforms.ToTensor(),
                                            transforms.Normalize(
                                                mean=[0.485, 0.456, 0.406],
                                                std=[0.229, 0.224, 0.225]),
                                        ]))

    print("Dataset: {}".format(len(predict_set)))
    predict_loader = DataLoader(predict_set,
                                batch_size=opt.batch_size,
                                shuffle=False,
                                collate_fn=utils.collate_fn_valid,
                                num_workers=opt.threads)

    # Predict
    results = predict(extractor, recurrent, predict_loader)
    np.savetxt(opt.output, results, fmt='%d')
    print("Output File have been written to {}".format(opt.output))
示例#14
0
        # tmp 表示每一行自由组合后的结果(n gram)
        # tmp: [['它'], ['是'], ['小'], ['狗'], ['它', '是'], ['是', '小'], ['小', '狗'], ['它', '是', '小'], ['是', '小', '狗']]
        tmp = generate_ngram(i, 3)
        # print(tmp)
        for d in tmp:
            root.add(d)
    print('------> 插入成功')


if __name__ == "__main__":
    # 加载停用词
    stopwords = getStopwords()

    rootName = ("data/root.pkl")
    if os.path.exists(rootName):
        root = loadModel(rootName)
    else:
        dictName = 'data/dict.txt'
        word_freq = loadWords(dictName)
        root = TrieNode('*', word_freq)
        saveModel(root, rootName)

    # 加载新的文章
    fileName = 'data/demo.txt'
    data = loadDate(fileName, stopwords)
    # 将新的文章插入到Root中
    loadDate2Root(data)

    # 定义取TOP5个
    N = 5
    result, add_word = root.wordFind(N)
示例#15
0
    https://arxiv.org/abs/1406.1078
'''
from __future__ import print_function

import json
from orderedattrdict import AttrDict
from keras.models import Model
from keras.layers import Input, LSTM, Dense

from utils import loadModel
from inputs import get_inputs

import numpy as np

# Load models
trainer_model = loadModel('t_s2s.json')

# Load training args.
with open("modelArgs.json", "r") as fp:
    modelArgs = AttrDict(json.load(fp))

    # Reverse-lookup token index to decode sequences back to
    # something readable.
    modelArgs.reverse_input_char_index = dict(
        (i, char) for char, i in modelArgs.input_token_index.items())
    modelArgs.reverse_target_char_index = dict(
        (i, char) for char, i in modelArgs.target_token_index.items())

    modelArgs.num_encoder_tokens = len(modelArgs.input_token_index)
    modelArgs.num_decoder_tokens = len(modelArgs.target_token_index)
示例#16
0
                        type=str,
                        default="",
                        help="http://localhost:51000/invocations")
    parser.add_argument("--mail",
                        type=str,
                        default="",
                        help="mail file path as txt -UTF8")
    if parser.parse_args().type == 'http':
        MODEL_REST_CALL(parser.parse_args().uri, )
    elif parser.parse_args().mail == '':
        #show predictions and accuracy of entire test set
        prediction, evaluation = sess.run([activation_OP, accuracy_OP],
                                          feed_dict={
                                              X: testX,
                                              yGold: testY
                                          })

        for i in range(len(testX)):
            print("predicts email %s as %s actually: %s -- %s" %
                  (str(i + 1), labelToString(prediction[i]),
                   labelToString(testY[i]), labelToString(prediction[i])
                   == labelToString(testY[i])))
        print("overall accuracy of dataset: %s percent" % str(evaluation))
    else:
        featurevect = covertMAT(parser.parse_args().mail,
                                loadModel("./data/features.pkl"))
        prediction = sess.run([activation_OP], feed_dict={X: featurevect})
        print(prediction[0])
        print("predict mail(%s) as %s" %
              (parser.parse_args().mail, labelToString(prediction[0])))
示例#17
0
    # net = VGG('VGG19')
    net = ResNet50()
    # net = PreActResNet18()
    # net = GoogLeNet()
    # net = DenseNet121()
    # net = ResNeXt29_2x64d()
    # net = MobileNet()
    # net = MobileNetV2()
    # net = DPN92()
    # net = ShuffleNetG2()
    #net = SENet18()
    net = MobileNetV2_Maxout()

    if args.resume:
        # Load checkpoint.
        net, best_acc, curEpoch = loadModel(modelPath, net)
    else:
        best_acc = 0
        curEpoch = 0

    print("current epoch:", curEpoch)
    print("current best acc:", best_acc)

    use_cuda = torch.cuda.is_available()
    #net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
    #cudnn.benchmark = True

    train(model=net,
          batchSize=128,
          epoch=30,
          checkPoint=10,
示例#18
0
 def load(self, inputFile):
     svm_ = cu.loadModel(inputFile)
     self.C = svm_.C
     self.clf = svm_.clf
示例#19
0
文件: LM.py 项目: lovygit/LMRC
                                                          train=True, batchSize=batch_size, data_pool_root=None)
        testLoader, test_classes = load_ImageNet200_online([test_root],
                                                        category_indexs=class_index[:i + CLASS_NUM_IN_BATCH],
                                                        train=False, batchSize=batch_size, shuffle=False)

        print("train classes:", train_classes)
        print("test classes:", test_classes)

        # assign label vector
        label_set, label_dict, new_label = label_allotter(label_set, train_classes, label_dict, output_dim)
        print("label set:", label_set)
        print("label dict keys:", label_dict.keys())
        print("new classes:", new_label)

        # # add head
        head_index = net.add_head_layer()

        # train and save model
        saveModelPath = "./model/imagenet200_online/resnet18_LM_NO_RC" + str(i)

        train(model=net, head_index=head_index, epoch=epoch, lr=lr, output_dim=output_dim, train_loader=trainLoader,
              test_loader=testLoader, label_dict=label_dict, modelPath=saveModelPath, checkPoint=10,
              useCuda=use_cuda, adjustLR=True, earlyStop=False, tolearnce=4)

        # net.freeze_weight(head_index)

        net, best_acc, best_epoch = loadModel(saveModelPath, net)



示例#20
0
    target_loader, _ = loading_data(target_data_path,
                                    mode='train',
                                    batch_size=batch_size)
    val_loader, _ = loading_data(target_data_path, mode='val')

    net = CSRNet().to(device)
    net_D = Discriminator(1).to(device)

    optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
                          lr=lr,
                          momentum=0.9)
    optimizer_D = optim.Adam(net_D.parameters(), lr=lr_D, betas=(0.9, 0.99))

    #load model
    if pre_trained_path['density'] != '':
        net = loadModel(net, pre_trained_path['density'])
    if pre_trained_path['discriminator'] != '':
        net_D = loadModel(net_D, pre_trained_path['discriminator'])

    criterion_dens = nn.MSELoss(size_average=False)
    #self.criterion_count = nn.L1Loss(size_average=False)
    criterion_disc = nn.BCEWithLogitsLoss()
    power = 0.9
    source_label = 0
    target_label = 1
    lambda_adv = 0.001

    # 训练阶段
    trainloader_iter = enumerate(train_loader)
    targetloader_iter = enumerate(target_loader)
    best_mae = sys.maxsize
示例#21
0
def main(opt):
    """ 
    Main process of train.py 

    Parameters
    ----------
    opt : namespace
        The option (hyperparameters) of these model
    """
    if opt.fixrandomseed:
        seed = 1334
        torch.manual_seed(seed)
        
        if opt.cuda: 
            torch.cuda.manual_seed(seed)

    print("==========> Loading datasets")
    img_transform = Compose([ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) if opt.normalize else ToTensor()

    # Dataset 
    train_loader, val_loader = getDataset(opt, img_transform)

    # TODO: Parameters Selection
    # TODO: Mean shift Layer Handling
    # Load Model
    print("==========> Building model")
    model = ImproveNet(opt.rb)
    
    # ----------------------------------------------- #
    # Loss: L1 Norm / L2 Norm                         #
    #   Perceptual Model (Optional)                   # 
    #   TODO Append Layer (Optional)                  #
    # ----------------------------------------------- #
    criterion  = nn.MSELoss(reduction='mean')
    perceptual = None if (opt.perceptual is None) else getPerceptualModel(opt.perceptual).eval()

    # ----------------------------------------------- #
    # Optimizer and learning rate scheduler           #
    # ----------------------------------------------- #
    print("==========> Setting Optimizer: {}".format(opt.optimizer))
    optimizer = getOptimizer(model, opt)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestones, gamma=opt.gamma)

    # ----------------------------------------------- #
    # Option: resume training process from checkpoint #
    # ----------------------------------------------- #
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            model, optimizer, _, _, scheduler = utils.loadCheckpoint(opt.resume, model, optimizer, scheduler)
        else:
            raise Exception("=> no checkpoint found at '{}'".format(opt.resume))

    # ----------------------------------------------- #
    # Option: load weights from a pretrain network    #
    # ----------------------------------------------- #
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading pretrained model '{}'".format(opt.pretrained))
            model = utils.loadModel(opt.pretrained, model, True)
        else:
            raise Exception("=> no pretrained model found at '{}'".format(opt.pretrained))

    # Select training device
    if opt.cuda:
        print("==========> Setting GPU")

        model = nn.DataParallel(model, device_ids=[i for i in range(opt.gpus)]).cuda()
        criterion = criterion.cuda()

        if perceptual is not None:
            perceptual = perceptual.cuda()
    else:
        print("==========> Setting CPU")
        
        model = model.cpu()
        criterion = criterion.cpu()

        if perceptual is not None:
            perceptual = perceptual.cpu()

    # Create container
    length     = opt.epochs * len(train_loader) // opt.val_interval
    loss_iter  = np.empty(length, dtype=float)
    perc_iter  = np.empty(length, dtype=float)
    psnr_iter  = np.empty(length, dtype=float)
    ssim_iter  = np.empty(length, dtype=float)
    mse_iter   = np.empty(length, dtype=float)
    lr_iter    = np.empty(length, dtype=float)
    iterations = np.empty(length, dtype=float)

    loss_iter[:]  = np.nan
    perc_iter[:]  = np.nan
    psnr_iter[:]  = np.nan
    ssim_iter[:]  = np.nan
    mse_iter[:]   = np.nan
    lr_iter[:]    = np.nan
    iterations[:] = np.nan

    # Set plotter to plot the loss curves 
    twinx = (opt.perceptual is not None)
    fig, axis = getFigureSpec(len(train_loader), twinx)

    # Set Model Saving Function
    if opt.save_item == "model":
        print("==========> Save Function: saveModel()")
        saveCheckpoint = utils.saveModel
    elif opt.save_item == "checkpoint":
        print("==========> Save Function: saveCheckpoint()")
        saveCheckpoint = utils.saveCheckpoint
    else:
        raise ValueError("Save Checkpoint Function Error")

    # Start Training
    print("==========> Training")
    for epoch in range(opt.starts, opt.epochs + 1):
        loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iterations, _, _ = train(
            model, optimizer, criterion, perceptual, train_loader, val_loader, scheduler, epoch, 
            loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iterations, 
            opt, name, fig, axis, saveCheckpoint
        )

        scheduler.step()

    # Save the last checkpoint for resume training
    utils.saveCheckpoint(os.path.join(opt.checkpoints, name, "final.pth"), model, optimizer, scheduler, epoch, len(train_loader))

    # TODO: Fine tuning

    return
"""
Execute this file to be prompted to choose wherever to train a new network or
retrain an existing one
"""

import utils
import create_network

choice = input(
    "Type \"n\" to create a new network or \"t\" to continue training an existing one: "
)
if choice == "n":
    create_network.createNetwork()

elif choice == "t":
    while True:
        try:
            epochs = int(input("How many epochs? Recommended 5-30 "))
            if epochs > 0:
                break
            else:
                print("Please give a positive whole number.")
        except ValueError:
            print("Please give a positive whole number.")

    model = utils.loadModel()  # load the model with utils, apply weights
    create_network.runNetwork(model, 64, epochs)

else:
    print("Please read the instruction message.")
 def load(self,inputFile):
   svm_ = cu.loadModel(inputFile)
   self.C = svm_.C
   self.clf = svm_.clf
示例#24
0
    test_size = 0.3
    selection_metric = 'eval_F1score'  # metric by which the best model will be selected
    # -- To store results
    Results = {}
    # for gridsearchCV
    Results_grid = {}

    # -- Dataframe where results will be saved
    df_results = pd.DataFrame(columns=[
        'Model', 'Pipeline', 'CV_train_score', 'eval_score', 'eval_precision',
        'eval_recall', 'eval_F1score', 'eval_AUC', 'train_score', 'test_score',
        'precision', 'recall', 'F1score', 'AUC', 'AUC_proba'
    ])

    # -- Load Model initial from disk
    pipeline_0 = loadModel(initial_model_path)
    # - Uncomment to get pipeline parameters
    #print(pipeline_0.get_params())

    # -- Initialize Figure for ROC curves
    plt.plot(figsize=(32, 32))

    # -----------------#
    #       DATA       #
    # -----------------#

    # -- Load Data from disk
    df = loadData(data_path)
    # -- Label distribution
    print(f"[DATA] distribution: \n {df.groupby('sentiment').size()} \n")
    # -- Label encoding ( 1: positif, 0: negatif)  return a dataframe
def temporal_action_segmentation():
    """ Using RNN network to segmentation the action. """
    start_epoch = 1

    #------------------------------------------------------
    # Create Model, optimizer, scheduler, and loss function
    #------------------------------------------------------
    recurrent = LSTM_Net(2048,
                         opt.hidden_dim,
                         opt.output_dim,
                         num_layers=opt.layers,
                         bias=True,
                         batch_first=False,
                         dropout=opt.dropout,
                         bidirectional=opt.bidirection,
                         seq_predict=True).to(DEVICE)

    # Weight_init
    if "orthogonal" in opt.weight_init:
        for layer, param in recurrent.recurrent.named_parameters():
            print("{} {}".format(layer, param.shape))
            if len(param.shape) >= 2:
                nn.init.orthogonal_(param)

    # Bias_init
    if "forget_bias_0" in opt.bias_init:
        for layer, param in recurrent.recurrent.named_parameters():
            if layer.startswith("bias"):
                start = int(param.shape[0] * 0.25)
                end = int(param.shape[0] * 0.5)
                param[start:end].data.fill_(0)

    if "forget_bias_1" in opt.bias_init:
        for layer, param in recurrent.recurrent.named_parameters():
            if layer.startswith("bias"):
                start = int(param.shape[0] * 0.25)
                end = int(param.shape[0] * 0.5)
                param[start:end].data.fill_(1)

    # Set optimizer
    if opt.optimizer == "Adam":
        optimizer = optim.Adam(recurrent.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2),
                               weight_decay=opt.weight_decay)
    elif opt.optimizer == "SGD":
        optimizer = optim.SGD(recurrent.parameters(),
                              lr=opt.lr,
                              momentum=opt.momentum,
                              weight_decay=opt.weight_decay)
    elif opt.optimizer == "ASGD":
        optimizer = optim.ASGD(recurrent.parameters(),
                               lr=opt.lr,
                               lambd=1e-4,
                               alpha=0.75,
                               t0=1000000.0,
                               weight_decay=opt.weight_decay)
    elif opt.optimizer == "Adadelta":
        optimizer = optim.Adadelta(recurrent.parameters(),
                                   lr=opt.lr,
                                   rho=0.9,
                                   eps=1e-06,
                                   weight_decay=opt.weight_decay)
    elif opt.optimizer == "Adagrad":
        optimizer = optim.Adagrad(recurrent.parameters(),
                                  lr=opt.lr,
                                  lr_decay=0,
                                  weight_decay=opt.weight_decay,
                                  initial_accumulator_value=0)
    elif opt.optimizer == "SparseAdam":
        optimizer = optim.SparseAdam(recurrent.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.b1, opt.b2),
                                     eps=1e-08)
    elif opt.optimizer == "Adamax":
        optimizer = optim.Adamax(recurrent.parameters(),
                                 lr=opt.lr,
                                 betas=(opt.b1, opt.b2),
                                 eps=1e-08,
                                 weight_decay=opt.weight_dacay)
    else:
        raise argparse.ArgumentError

    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=opt.milestones,
                                               gamma=opt.gamma)

    # Load parameter
    if opt.pretrain:
        recurrent = utils.loadModel(opt.pretrain, recurrent)
        print("Loaded pretrain model: {}".format(opt.pretrain))
    if opt.resume:
        recurrent, optimizer, start_epoch, scheduler = utils.loadCheckpoint(
            opt.resume, recurrent, optimizer, scheduler)
        print("Resume training: {}".format(opt.resume))

    # Set criterion
    criterion = nn.CrossEntropyLoss().to(DEVICE)

    # Set dataloader
    transform = transforms.ToTensor()

    trainlabel = os.path.join(opt.train, "labels", "train")
    trainfeature = os.path.join(opt.train, "feature", "train")
    vallabel = os.path.join(opt.val, "labels", "valid")
    valfeature = os.path.join(opt.val, "feature", "valid")

    train_set = dataset.FullLengthVideos(
        None,
        trainlabel,
        trainfeature,
        downsample=opt.train_downsample,
        transform=transform,
        summarize=opt.summarize,
        sampling=opt.sampling,
    )
    train_loader = DataLoader(train_set,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              collate_fn=utils.collate_fn_seq,
                              num_workers=opt.threads)
    val_set = dataset.FullLengthVideos(
        None,
        vallabel,
        valfeature,
        downsample=opt.val_downsample,
        transform=transform,
        summarize=None,
        sampling=0,
    )
    val_loader = DataLoader(val_set,
                            batch_size=1,
                            shuffle=False,
                            collate_fn=utils.collate_fn_seq,
                            num_workers=opt.threads)
    val_set_2 = dataset.FullLengthVideos(None,
                                         vallabel,
                                         valfeature,
                                         downsample=opt.train_downsample,
                                         transform=transform,
                                         summarize=None,
                                         sampling=0)
    val_loader_2 = DataLoader(val_set_2,
                              batch_size=1,
                              shuffle=False,
                              collate_fn=utils.collate_fn_seq,
                              num_workers=opt.threads)

    # Show the memory used by neural network
    print("The neural network allocated GPU with {:.1f} MB".format(
        torch.cuda.memory_allocated() / 1024 / 1024))

    #------------------
    # Train the models
    #------------------
    trainloss, trainaccs, valloss, valaccs = [], [], [], []
    epochs = []
    categories = [name.split('.')[0] for name in os.listdir(valfeature)]

    # Pre-test of the pretrain model
    acc, loss = val(recurrent, val_loader, 0, criterion)
    valloss.append(loss)
    valaccs.append(acc)
    epochs.append(0)

    for epoch in range(start_epoch, opt.epochs + 1):
        scheduler.step()

        # Save the train loss and train accuracy
        max_trainaccs = max(trainaccs) if len(trainaccs) > 0 else 0
        min_trainloss = min(trainloss) if len(trainloss) > 0 else 0
        recurrent, acc, loss = train(recurrent, train_loader, optimizer, epoch,
                                     criterion, max_trainaccs, min_trainloss)
        trainloss.append(loss)
        trainaccs.append(acc)

        # validate the model with several downsample ratio
        acc, loss = val(recurrent, val_loader, epoch, criterion)
        valloss.append(loss)
        valaccs.append(acc)

        acc, loss = val(recurrent,
                        val_loader_2,
                        epoch,
                        criterion,
                        visual=False)

        # Save the epochs
        epochs.append(epoch)

        for x, y in ((trainloss, "trainloss.txt"),
                     (trainaccs, "trainaccs.txt"), (valloss, "valloss.txt"),
                     (valaccs, "valaccs.txt"), (epochs, "epochs.txt")):
            np.savetxt(os.path.join(opt.log, "problem_3", opt.tag, y),
                       np.array(x))

        if epoch % opt.save_interval == 0:
            savepath = os.path.join(opt.checkpoints, "problem_3", opt.tag,
                                    str(epoch) + '.pth')
            utils.saveCheckpoint(savepath, recurrent, optimizer, scheduler,
                                 epoch)

        # Draw the accuracy / loss curve
        draw_graphs(trainloss,
                    valloss,
                    trainaccs,
                    valaccs,
                    epochs,
                    label=categories)

    return recurrent
示例#26
0
from utils import save_the_images, loadModel, prepare_accuracy_visualisation_images

# Firstly, load in the model from previous training sessions
model = loadModel('new_model')

# Secondly, load in images to colorise (they only have the lightness channel)
color_me = []
location = "Test/"
color_me = prepare_accuracy_visualisation_images(color_me, location)

# Thirdly, Colorize the loaded images
output = model.predict(color_me)
output = output * 128  # Turn the -1 to 1 values into proper Lab values.

# Finally, Save the colorized images
save_the_images(output, color_me)
示例#27
0
from utils import save_the_images, loadModel, prepare_accuracy_visualisation_images

# Firstly, load in the model from previous training sessions
model = loadModel('model')

# Secondly, load in images to colorise (they only have the lightness channel)
color_me = []
location = "static/Test/"
color_me = prepare_accuracy_visualisation_images(color_me, location)

# Thirdly, Colorize the loaded images
output = model.predict(color_me)
output = output * 128  # Turn the -1 to 1 values into proper Lab values.

# Finally, Save the colorized images
save_the_images(output, color_me)
示例#28
0
文件: demo.py 项目: DL-ML/My_CODA
    img = Image.open(img_path)
    if img.mode == 'L':
        img = img.convert('RGB')
    den = pd.read_csv(den_path).values
    den = den.astype(np.float32, copy=False)

    plt.imshow(img)
    plt.figure()
    plt.imshow(den,cmap=CM.jet)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    mean_std = ([0.5, 0.5, 0.5],[0.25, 0.25, 0.25])
    img_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    img = img_transform(img)
    img = img.unsqueeze(dim=0)
    img = img.to(device)

    net = CSRNet().to(device)
    net = loadModel(net, pretrained_model_path)

    predDmap = net(img).detach().squeeze()
    plt.figure()
    plt.imshow(predDmap,cmap=CM.jet)
    

#%%
示例#29
0
from Models.hmm import HMM
import utils
import os
import metric
from Models.crf import CRFModel
from Models.bilstm import  BiLSTM
from torch.optim import Adamax

import torch
import torch.nn.functional as F
trainWordLists,trainTagLists,word2id,tag2id=utils.create('train.txt',make_vocab=True)
devWordLists,devTagList=utils.create('dev.txt',make_vocab=False)
#隐马尔科夫模型训练
print('HMM************************')
if os.path.exists('ckpts/hmm.pkl'):
    hmm=utils.loadModel('ckpts/hmm.pkl')
    predictTags = hmm.test(devWordLists, word2id, tag2id)
else:
    hmm=HMM(len(tag2id),len(word2id))
    hmm.train(trainWordLists,trainTagLists,tag2id,word2id)
    utils.saveModel('ckpts/hmm.pkl',hmm)
    predictTags=hmm.test(devWordLists,word2id,tag2id)
accuracy=metric.accuracy(predictTags,devTagList)
print('accuracy: ',accuracy)
print('CRF****************************')
# #条件随机序列场模型训练
if os.path.exists('ckpts/crf.pkl'):
    crf=utils.loadModel('ckpts/crf.pkl')
    print(crf)
    predictTags=crf.test(devWordLists)
else:
示例#30
0
def continuous_frame_recognition():
    """ Using RNN network to recognize the action. """
    start_epoch = 1

    # -----------------------------------------------------
    # Create Model, optimizer, scheduler, and loss function
    # -----------------------------------------------------
    # extractor = resnet50(pretrained=True).to(DEVICE)
    recurrent = LSTM_Net(2048,
                         opt.hidden_dim,
                         opt.output_dim,
                         num_layers=opt.layers,
                         bias=True,
                         batch_first=False,
                         dropout=opt.dropout,
                         bidirectional=opt.bidirection,
                         seq_predict=False).to(DEVICE)

    # ----------------------------------------------
    # For signal direction LSTM
    #   weight_ih_l0 torch.Size([512, 2048])
    #   weight_hh_l0 torch.Size([512, 128])
    #   bias_ih_l0 torch.Size([512])
    #   bias_hh_l0 torch.Size([512])
    #
    # For bidirectional LSTM, reverse layer is added.
    #   weight_ih_l0_reverse torch.Size([512, 2048])
    #   weight_hh_l0_reverse torch.Size([512, 128])
    #   bias_ih_l0_reverse torch.Size([512])
    #   bias_hh_l0_reverse torch.Size([512])
    # ----------------------------------------------

    # Weight_init
    if "orthogonal" in opt.weight_init:
        for layer, param in recurrent.recurrent.named_parameters():
            print("{} {}".format(layer, param.shape))
            if len(param.shape) >= 2:
                nn.init.orthogonal_(param)

    # Bias_init
    if "forget_bias_0" in opt.bias_init:
        for layer, param in recurrent.recurrent.named_parameters():
            if layer.startswith("bias"):
                size = param.shape[0]
                start = int(size * 0.25)
                end = int(size * 0.5)
                param[start:end].data.fill_(0)

    if "forget_bias_1" in opt.bias_init:
        for layer, param in recurrent.recurrent.named_parameters():
            if layer.startswith("bias"):
                size = param.shape[0]
                start = int(size * 0.25)
                end = int(size * 0.5)
                param[start:end].data.fill_(1)

    # Set optimizer
    if opt.optimizer == "Adam":
        optimizer = optim.Adam(recurrent.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2),
                               weight_decay=opt.weight_decay)
    elif opt.optimizer == "SGD":
        optimizer = optim.SGD(recurrent.parameters(),
                              lr=opt.lr,
                              momentum=opt.momentum,
                              weight_decay=opt.weight_decay)
    elif opt.optimizer == "ASGD":
        optimizer = optim.ASGD(recurrent.parameters(),
                               lr=opt.lr,
                               lambd=1e-4,
                               alpha=0.75,
                               t0=1000000.0,
                               weight_decay=opt.weight_decay)
    elif opt.optimizer == "Adadelta":
        optimizer = optim.Adadelta(recurrent.parameters(),
                                   lr=opt.lr,
                                   rho=0.9,
                                   eps=1e-06,
                                   weight_decay=opt.weight_decay)
    elif opt.optimizer == "Adagrad":
        optimizer = optim.Adagrad(recurrent.parameters(),
                                  lr=opt.lr,
                                  lr_decay=0,
                                  weight_decay=opt.weight_decay,
                                  initial_accumulator_value=0)
    elif opt.optimizer == "SparseAdam":
        optimizer = optim.SparseAdam(recurrent.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.b1, opt.b2),
                                     eps=1e-08)
    elif opt.optimizer == "Adamax":
        optimizer = optim.Adamax(recurrent.parameters(),
                                 lr=opt.lr,
                                 betas=(opt.b1, opt.b2),
                                 eps=1e-08,
                                 weight_decay=opt.weight_dacay)
    else:
        raise argparse.ArgumentError

    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=opt.milestones,
                                               gamma=opt.gamma)

    # Load parameter
    if opt.pretrain:
        recurrent = utils.loadModel(opt.pretrain, recurrent)
    if opt.resume:
        recurrent, optimizer, start_epoch, scheduler = utils.loadCheckpoint(
            opt.resume, recurrent, optimizer, scheduler)

    # Set criterion
    criterion = nn.CrossEntropyLoss().to(DEVICE)

    # Set dataloader
    transform = transforms.ToTensor()

    trainlabel = os.path.join(opt.train, "label", "gt_train.csv")
    trainfeature = os.path.join(opt.train, "feature", "train")
    vallabel = os.path.join(opt.val, "label", "gt_valid.csv")
    valfeature = os.path.join(opt.val, "feature", "valid")

    train_set = dataset.TrimmedVideos(None,
                                      trainlabel,
                                      trainfeature,
                                      downsample=opt.downsample,
                                      transform=transform)
    train_loader = DataLoader(train_set,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              collate_fn=utils.collate_fn,
                              num_workers=opt.threads)

    # Show the memory used by neural network
    print("The neural network allocated GPU with {:.1f} MB".format(
        torch.cuda.memory_allocated() / 1024 / 1024))

    #------------------
    # Train the models
    #------------------
    trainloss = []
    trainaccs = []
    valloss = []
    valaccs = []
    epochs = []

    for epoch in range(start_epoch, opt.epochs + 1):
        scheduler.step()

        # Save the train loss and train accuracy
        max_trainaccs = max(trainaccs) if len(trainaccs) else 0
        min_trainloss = min(trainloss) if len(trainloss) else 0
        recurrent, loss, acc = train(recurrent, train_loader, optimizer, epoch,
                                     criterion, max_trainaccs, min_trainloss)
        trainloss.append(loss)
        trainaccs.append(acc)

        # validate the model with several downsample ratio
        loss_list, acc_list, label_list = [], [], []
        for downsample in [1, 2, 4, 6, 12]:
            val_set = dataset.TrimmedVideos(None,
                                            vallabel,
                                            valfeature,
                                            downsample=downsample,
                                            transform=transform)
            val_loader = DataLoader(val_set,
                                    batch_size=1,
                                    shuffle=True,
                                    collate_fn=utils.collate_fn,
                                    num_workers=opt.threads)
            print("[Epoch {}] [Validation] [Downsample: {:2d}]".format(
                epoch, downsample))
            acc, loss = val(recurrent, val_loader, epoch, criterion)

            loss_list.append(loss)
            acc_list.append(acc)
            label_list.append('val_{}'.format(downsample))

        valloss.append(loss_list)
        valaccs.append(acc_list)

        # Save the epochs
        epochs.append(epoch)

        # with open(os.path.join(opt.log, "problem_2", opt.tag, 'statistics.txt'), 'w') as textfile:
        #     textfile.write("\n".join(map(lambda x: str(x), (trainloss, trainaccs, valloss, valaccs, epochs))))

        records = list(
            map(lambda x: np.array(x),
                (trainloss, trainaccs, valloss, valaccs, epochs)))
        for record, name in zip(records,
                                ('trainloss.txt', 'trainaccs.txt',
                                 'valloss.txt', 'valaccs.txt', 'epochs.txt')):
            np.savetxt(os.path.join(opt.log, "problem_2", opt.tag, name),
                       record)

        if epoch % opt.save_interval == 0:
            savepath = os.path.join(opt.checkpoints, "problem_2", opt.tag,
                                    str(epoch) + '.pth')
            utils.saveCheckpoint(savepath, recurrent, optimizer, scheduler,
                                 epoch)

        # Draw the accuracy / loss curve
        draw_graphs(trainloss, valloss, trainaccs, valaccs, epochs,
                    "problem_2", label_list)

    return recurrent
示例#31
0
#!/usr/bin/python3

import utils
import numpy
import colors

imageWidth = 224

if __name__ == "__main__":

    # Load the model
    model = utils.loadModel("model/keras_model.h5")

    # Get an image from the webcam
    image = utils.getWebcam("/dev/video0")
    normalImage = utils.prepareImage(image, imageWidth)

    # Run the prediction
    prediction = model.predict(normalImage)

    # get the largest value
    largest = max(prediction[0])
    index = int(numpy.where(prediction[0] == largest)[0])

    # Get the labels
    labels = utils.openLabels("model/labels.txt")
    label = utils.getLabel(labels, index)

    # Run the user defined function
    colors.runColor(label, index, prediction)