Exemplo n.º 1
0
def crossValidateRMAE():
    """使用交叉验证验证模型精度"""
    logger.info("{}-crossValidateRMAE-{}".format('*' * 25, '*' * 25))
    X, y = getTrainData()

    _, trainModels = model.getModel()
    stackModel = model.stacking(trainModels, LinearRegression())
    rmae = tool.crossValueScore(stackModel, X, y, tool.computeRMAE)
    logger.info("stacking model, cross validate RMAE: {}".format(rmae))

    names, models = model.getModel()

    for n, m in zip(names, models):
        rmae = tool.crossValueScore(m, X, y, tool.computeRMAE)
        logger.info("model: {}, cross validate RMAE: {}".format(n, rmae))
Exemplo n.º 2
0
def setUpModel():
    tf.reset_default_graph()
    model = getModel(words_data['input_tensor_length'], words_data['output_tensor_length'])
    model = tflearn.DNN(model, tensorboard_dir='tflearn_logs')
    model.load(MODEL_FILE)

    return model
Exemplo n.º 3
0
def submit_file():
    if request.method == 'POST':
        if 'file' not in request.files:
            flash('No file folder')
            return redirect(request.url)
        file = request.files['file']
        if file.filename == '':
            flash('No file selected for uploading')
            return redirect(request.url)
        if file:
            filename = secure_filename(file.filename)  # Security essential !!
            if os.path.exists(
                    os.path.join(app.config['UPLOAD_FOLDER'], filename)):
                imageType = magic.from_file('uploads/' + filename, mime=True)
                filename = get_random_string(3) + '_' + (filename.split(
                    '.', 1))[0] + '.' + (imageType.split('/'))[1]
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))

            model = getModel()
            result = getPrediction(filename, model)

            if result == '1.0':
                flash("Normal")
            elif result == '0.0':
                flash("Covid")
            else:
                flash("Undefined")

            flash(filename)

            send_async('prediction', filename=filename)

            return redirect('/')
Exemplo n.º 4
0
def trainBySingleModel():
    """训练单个模型并保存结果"""
    X, Y = getTrainData()
    names, models = model.getModel()

    for n, m in zip(names, models):
        singleModle = model.train_by_model(n, X, Y)
        io.saveData(singleModle, singleModelSaving.format(n))
Exemplo n.º 5
0
def old1():
    modl = model.getModel((150, 150, 3))
    train_path, val_path, TestPath = pth.getPaths()
    train_generator, val_generator, test_generator = data_aug.getGens(
        train_path, val_path, TestPath, (150, 150))
    hist = modl.fit_generator(train_generator,
                              steps_per_epoch=1500 // 20,
                              epochs=5,
                              validation_data=val_generator,
                              validation_steps=500 // 20).history
def main():
    now = datetime.datetime.now()
    logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat()))
    model, optimizer = getModel(opt)

    criterion = torch.nn.MSELoss()

    if opt.GPU > -1:
        print('Using GPU', opt.GPU)
        model = model.cuda(opt.GPU)
        criterion = criterion.cuda(opt.GPU)

    val_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'val'),
        batch_size=1,
        shuffle=True if opt.DEBUG > 1 else False,
        num_workers=1)

    if opt.test:
        _, preds = val(0, opt, val_loader, model, criterion)
        torch.save({
            'opt': opt,
            'preds': preds
        }, os.path.join(opt.saveDir, 'preds.pth'))
        return

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.trainBatch,
                                               shuffle=True,
                                               num_workers=int(opt.nThreads))

    for epoch in range(1, opt.nEpochs + 1):
        mark = epoch if opt.saveAllModels else 'last'
        log_dict_train, _ = train(epoch, opt, train_loader, model, criterion,
                                  optimizer)
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if epoch % opt.valIntervals == 0:
            log_dict_val, preds = val(epoch, opt, val_loader, model, criterion)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            saveModel(
                os.path.join(opt.saveDir, 'model_{}.checkpoint'.format(mark)),
                model)  # optimizer
        logger.write('\n')
        if epoch % opt.dropLR == 0:
            lr = opt.LR * (0.1**(epoch // opt.dropLR))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
    torch.save(model.cpu(), os.path.join(opt.saveDir, 'model_cpu.pth'))
Exemplo n.º 7
0
Arquivo: train.py Projeto: mmakos/HPC
def getModel():
    try:
        mod = tf.keras.models.load_model('../../data/models/' +
                                         args.model_name)
        print("Model " + args.model_name + " loaded.")
    except Exception:
        print("Creating new model.")
        mod = model.getModel("smallVGG")
    print()
    print(mod.summary())
    return mod
Exemplo n.º 8
0
def testSingleModelRMAE():
    """使用所有数据进行训练,然后对训练数据进行预测"""
    logger.info("{}-testSingleModelRMAE-{}".format('*' * 25, '*' * 25))
    X, Y = getTrainData()
    names, models = model.getModel()

    for n in names:
        m = io.getData(singleModelSaving.format(n))
        pdtValue = m.predict(X)
        retMSE = tool.computeRMAE(Y, pdtValue)
        logger.info("model: {}, using all data, RMAE : {}".format(n, retMSE))
Exemplo n.º 9
0
def main():
    opt = opts().parse()
    now = datetime.datetime.now()
    logger = Logger(opt.saveDir, now.isoformat())
    model, optimizer = getModel(opt)
    criterion = torch.nn.MSELoss().cuda()

    # if opt.GPU > -1:
    #     print('Using GPU {}',format(opt.GPU))
    #     model = model.cuda(opt.GPU)
    #     criterion = criterion.cuda(opt.GPU)
    # dev = opt.device
    model = model.cuda()

    val_loader = torch.utils.data.DataLoader(
            MPII(opt, 'val'), 
            batch_size = 1, 
            shuffle = False,
            num_workers = int(ref.nThreads)
    )

    if opt.test:
        log_dict_train, preds = val(0, opt, val_loader, model, criterion)
        sio.savemat(os.path.join(opt.saveDir, 'preds.mat'), mdict = {'preds': preds})
        return
    # pyramidnet pretrain一次,先定义gen的训练数据loader
    train_loader = torch.utils.data.DataLoader(
            MPII(opt, 'train'), 
            batch_size = opt.trainBatch, 
            shuffle = True if opt.DEBUG == 0 else False,
            num_workers = int(ref.nThreads)
    )
    # 调用train方法
    for epoch in range(1, opt.nEpochs + 1):
        log_dict_train, _ = train(epoch, opt, train_loader, model, criterion, optimizer)
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if epoch % opt.valIntervals == 0:
            log_dict_val, preds = val(epoch, opt, val_loader, model, criterion)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            #saveModel(model, optimizer, os.path.join(opt.saveDir, 'model_{}.checkpoint'.format(epoch)))
            torch.save(model, os.path.join(opt.saveDir, 'model_{}.pth'.format(epoch)))
            sio.savemat(os.path.join(opt.saveDir, 'preds_{}.mat'.format(epoch)), mdict = {'preds': preds})
        logger.write('\n')
        if epoch % opt.dropLR == 0:
            lr = opt.LR * (0.1 ** (epoch // opt.dropLR))
            print('Drop LR to {}'.format(lr))
            adjust_learning_rate(optimizer, lr)
    logger.close()
    torch.save(model.cpu(), os.path.join(opt.saveDir, 'model_cpu.pth'))
def removePhone(phones,manufacturer,model):
    '''
    in - phones- list of all phones
        manufacturer - the manufacturer of the phone we want to be removed
        model - the model of the phone we want to be removed
    out - True if it removed said phone
         False if said phone doesnt exist
    '''
    for index in range (0,len(phones)):
        if getManufacturer(phones[index]) == manufacturer and getModel(phones[index]) == model: 
            del phones[index]
            return True
    return False
def increaseAmount(phones,manufacturer,model,amount):
    '''
    in - phones- list of all phones
        manufacturer - the manufacturer of the phone we want to be removed
        model - the model of the phone we want to be removed
        amount - the price amount we want to add to the phone <manufacturer,model>
    out - True if the modification was made
          False otherwise, if no such phone exists
    '''
    for index in range (0,len(phones)):
        if getManufacturer(phones[index]) == manufacturer and getModel(phones[index]) == model:
            oldPrice = getPrice(phones[index])
            setPrice(phones[index],oldPrice+amount)
            return True
    return False
Exemplo n.º 12
0
 def __init__(self, cfg):
     self.cfg = cfg
     os.makedirs(self.cfg.EXP.PATH, exist_ok=True)
     os.makedirs(self.cfg.EXP.PATH + '/valimg', exist_ok=True)
     # logger
     self.writer = SummaryWriter(self.cfg.EXP.PATH)
     # 计时器
     self.t = {'iter': Timer(), 'train': Timer(), 'val': Timer()}
     # 保存实验环境 # TODO: 启用
     temp = os.path.join(self.cfg.EXP.PATH, 'code')
     utils.copy_cur_env('./', temp, exception='exp')
     # 读取数据集
     self.meanImg, self.trainloader, self.valloader = datasets.getData(
         self.cfg)
     # 定义网络
     self.net = model.getModel(cfg)
     # 损失函数
     self.criterion = torch.nn.MSELoss()
     # 优化器
     self.optimizer = torch.optim.Adam(
         self.net.parameters(),
         lr=self.cfg.TRAIN.LR,
         weight_decay=self.cfg.TRAIN.WEIGHT_DECAY)
     # 初始化一些变量
     self.beginEpoch = 1
     self.batch = 1
     self.bestacc = 0
     # 载入预训练模型
     if self.cfg.TRAIN.RESUME:
         print('Loading Model..........')
         saved_state = torch.load(self.cfg.TRAIN.RESUME_PATH)
         self.net.load_state_dict(saved_state['weights'])
         self.beginEpoch = saved_state['epoch']
         self.batch = saved_state['batch']
         self.bestacc = saved_state['bestacc']
     # GPU设定
     self.gpu = torch.cuda.is_available() and self.cfg.TRAIN.USE_GPU
     self.device = 'cuda' if self.gpu else 'cpu'
     if self.gpu:
         torch.cuda.set_device(self.cfg.TRAIN.GPU_ID[0])
         self.criterion.cuda()
         if len(self.cfg.TRAIN.GPU_ID) > 1:
             self.net = torch.nn.DataParallel(
                 self.net, device_ids=self.cfg.TRAIN.GPU_ID)
         self.net = self.net.cuda()
     else:
         self.net = self.net.cpu()
         self.criterion.cpu()
Exemplo n.º 13
0
def train(audio_path, plot_matrix=False):

    x_data, y_data = get_set(26, 9, audio_path)
    x_data = keras.preprocessing.sequence.pad_sequences(x_data, maxlen=100)

    x_train, x_test, Y_train, Y_test = train_test_split(x_data,
                                                        y_data,
                                                        test_size=0.1,
                                                        random_state=42)

    y_train = keras.utils.to_categorical(Y_train, 16)
    y_test = keras.utils.to_categorical(Y_test, 16)

    model = getModel((x_train.shape[1], x_train.shape[2]), y_train.shape[1])

    history = model.fit(x_train,
                        y_train,
                        batch_size=10,
                        epochs=137,
                        verbose=1,
                        validation_data=(x_test, y_test))
    # Plot training & validation accuracy values
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

    # Plot training & validation loss values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    model.save(model_name)

    if plot_matrix:
        plot(x_test, Y_test, model_name)
Exemplo n.º 14
0
def train(args):
    epochs = int(os.path.expanduser(args.epochs))
    batch_size = int(os.path.expanduser(args.batch_size))

    #TODO:
    # Add TensorBoard
    # Code Model
    # code f*****g loss f

    checkPointPath = './pretrainedModel/'
    if not os.path.isdir(checkPointPath):
        os.makedirs(checkPointPath)
    modelPath = os.path.join(checkPointPath, 'LastModel.hdf5')
    checkPoint = ModelCheckpoint(
        modelPath,
        monitor="val_loss",
        verbose=0,
        save_best_only=True,
        save_weights_only=False,
        mode="auto",
        save_freq="epoch",
    )

    input_shape = (448, 448, 3)
    inputs = Input(input_shape)

    if os.path.exists(modelPath):
        model = load_model(modelPath)
    else:
        model = getModel(inputs)
        model.compile(loss=getloss, optimizer='adam')

    train_generator = YoloSequenceData('train', batch_size)
    validation_generator = YoloSequenceData('val', batch_size)

    model.fit_generator(train_generator,
                        steps_per_epoch=len(train_generator),
                        epochs=epochs,
                        validation_data=validation_generator,
                        validation_steps=len(validation_generator),
                        callbacks=[checkPoint],
                        verbose=0)
Exemplo n.º 15
0
def main():
    data = load_dataCSV()
    
    look_back = 28
    jump=4
    
    train_data, test_data = dp.rescale_data(data)
    trainX, trainY = dp.create_dataset(train_data, look_back)
    trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
    testX, testY = dp.create_dataset(test_data, look_back)  
    
    model = mod.getModel(look_back)
    model.fit(
        trainX,
        trainY,
        batch_size=128,
        nb_epoch=300,
        validation_split=0.10)
    
    pred,perfs=mod.testModel(model,testX,testY,jump,look_back)
    
    actual_test_data=test_data[len(test_data)-len(pred):]

    
    print("\n Average Covarance between predicted and actual prices on only predicted days:")
    print(np.mean(perfs))
    
    print("\n Covarance between predicted and actual prices on all days:")    
    print(np.cov(actual_test_data,pred)[1][0])
    
    plt.figure(3)
    plt.plot(actual_test_data)
    
    plt.figure(4)
    plt.plot(pred)
    
    mod.saveModel(model,'lstm3')
Exemplo n.º 16
0
def compileModel(epochs=5000, seve_name='model'):
    (X, Y, urls) = getDataSet()
    model = getModel()
    trainModel(model, X, Y, epochs)
    y_pred = [x[0] for x in model.predict(X)]

    accuracy, false_like, false_dislike = calculateModelMetrics(y_pred, Y)
    corelation = getCorrelation(y_pred, Y)
    realTolerance = getMid(Y)
    binTolerance = getMid([int(x > 0.5) for x in y_pred])
    modelTolerance = getMid(y_pred)

    new_data = [{
        'prediction':
        (Mark.like.name if x > modelTolerance else Mark.dislike.name)
    } for x in y_pred]
    memo.upd_urls(dict(zip(urls, new_data)))

    metrics = {
        'real tolerance': realTolerance,
        'bin tolerance': binTolerance,
        'model tolerance': modelTolerance,
        'accuracy': accuracy,
        'correlation': corelation,
        'false_like': false_like,
        'false_dislike': false_dislike,
    }

    print("{:<16} {:<8}".format('METRIC', 'VALUE'))
    for key, val in metrics.items():
        print("{:<16} {:<8}".format(key, val))

    if not os.path.isdir(ROOT_PATH + '/models/'):
        os.mkdir(ROOT_PATH + '/models/')

    model.save('models/' + seve_name + '.h5')
Exemplo n.º 17
0
                                noise_level=noise)
    train_dataloader = DataLoader(train_dataset,
                                  config.NNBATCHSIZE,
                                  shuffle=True,
                                  num_workers=16)

    valid_dataset = IronDataset(train[val_index],
                                train_tr[val_index],
                                seq_len=config.GROUP_BATCH_SIZE,
                                flip=False)
    valid_dataloader = DataLoader(valid_dataset,
                                  config.NNBATCHSIZE,
                                  shuffle=False)

    it = 0
    model = getModel(config)

    early_stopping = EarlyStopping(
        patience=10,
        is_maximize=True,
        checkpoint_path=os.path.join(
            config.outdir,
            "gru_clean_checkpoint_expid_{}_fold_{}_iter_{}.pt".format(
                config.expriment_id, index, it)))

    weight = None  # cal_weights()
    criterion = nn.CrossEntropyLoss(weight=weight)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.LR)
    optimizer = torchcontrib.optim.SWA(optimizer,
                                       swa_start=10,
                                       swa_freq=2,
Exemplo n.º 18
0
if __name__ == '__main__':
    global C
    C = Conf()
    res = []
    df = getData(C.STRFRQ[C.DATAFREQ])
    # df = prepareData(df)
    ndf = normaliz(df)
    dfY = df[C.Y]
    for sn in SCENARIOS:
        #encoder.add(Dropout(0.5))
        #encoder.add(LSTM(output_dim=C.XOUT_DIM, return_sequences=True, stateful=True))
        C.overwrite(sn)
        print(" - " + C.SCENARIO)

        train_Xs, train_Y = getTrainData(C, ndf, dfY)
        val_Xs, val_Y = getTrainData(C, ndf, dfY, 'test')
        model = getModel(C)

        #cost = model.train_on_batch([train_Xs[i][0:C.BATCH_SIZE] for i in range(len(train_Xs))], train_Y[0:C.BATCH_SIZE])
        #print(cost)

        re = model.fit(train_Xs,
                       train_Y,
                       batch_size=C.BATCH_SIZE,
                       nb_epoch=20,
                       validation_data=(val_Xs, val_Y))
        res.append([C.SCENARIO, re.history])

    ResAnaysis(res)
Exemplo n.º 19
0
    # 发送方向键
    if bestDir == 0:
        bodyElem.send_keys(Keys.UP)
    elif bestDir == 1:
        bodyElem.send_keys(Keys.RIGHT)
    elif bestDir == 2:
        bodyElem.send_keys(Keys.DOWN)
    elif bestDir == 3:
        bodyElem.send_keys(Keys.LEFT)


# 循环发送方向键
for i in range(10000):
    # 对当前画面建模
    # 返回false时,说明程序运行过快,tile元素已不在原DOM中,搜索不到,所以跳过此次循环
    if model.getModel(browser) == False:
        continue
    # 发送方向键
    sendDirKey()
    time.sleep(0.02)
    # 检测游戏是否失败
    try:
        if browser.find_element_by_class_name('game-over'):
            break
    except:
        None
    # 检测游戏是否胜利
    try:
        if browser.find_element_by_class_name('game-won'):
            break
    except:
Exemplo n.º 20
0
    # Get all the word trie nodes that are asociated with our given prefix
    tries = [('', currentTrie)] if not prefix else list(
        currentTrie.getLetters(prefix[-1]))
    # Obtain every sentence from our data that matches the given prefix
    # and occurs more than frequency_threshold times
    results = dict( ((sentence, count) for _, tr in tries\
     for sentence, count in tr if count > frequency_threshold) )
    # Sort the resulting sentences by their frequencies and only keep the top max_results
    return sorted(results, key=results.__getitem__, reverse=True)[:max_results]


if __name__ == '__main__':
    # Console interface for autocomplete
    datapath = os.path.join(os.path.dirname(__file__),
                            'sample_conversations.json')
    trie = getModel(datapath)
    print("Type in a prefix to autocomplete",\
     "Use SET_MAX_RESULTS n to set the maximum number of results.",\
     "Use SET_FREQ_THRESHOLD n to set the frequency threshold.",\
     "Type QUIT to exit.")
    max_results, frequency_threshold = None, 0
    while True:
        inp = input("prefix> ").strip()
        if inp == "QUIT":
            break
        elif inp[:16] == 'SET_MAX_RESULTS ':
            try:
                max_results = max(0, int(inp[16:]))
            except:
                print("Error setting max_results to", "'" + inp[16:] + "'.",
                      'n must be a nonnegative integer.')
Exemplo n.º 21
0
def main(experiment, logging, augmentation, dataset, model, metric, training):

    if experiment["reproducible"]:
        print('fix seed on')
        seed = 0
        random.seed(seed)  # augmentation
        np.random.seed(seed)  # numpy
        ia.seed(seed)  #  imgaug library
        torch.manual_seed(seed)  # cpu
        torch.cuda.manual_seed(seed)  # gpu
        torch.cuda.manual_seed_all(seed)  # multi gpu
        torch.backends.cudnn.enabled = False  # cudnn library
        torch.backends.cudnn.deterministic = True

    ##################
    #     logging    #
    ##################
    LOG = Logger(**logging)
    LOG('print', name='config', values=json.dumps(config))

    ##################
    #     dataset    #
    ##################
    datasets = getDataset(**dataset, **augmentation)

    LOG('slack',
        name='dataset',
        values=[str(datasets['train']),
                str(datasets['val'])])

    ##################
    #      model     #
    ##################

    MODEL = getModel(**model)
    input_size = [augmentation['channel']] + augmentation['size']
    MODEL.modelSummary(input_size, LOG)

    ##################
    #   metric   #
    ##################

    metricParser = TypeParser(types={
        "IOU": M.IOU,
        "DICE": M.DICE,
        "accuracy": M.Accuracy,
        "f1": M.F1,
    })
    metrics = [metricParser(**m) for m in metric]

    ##################
    #    training    #
    ##################
    trainer = Trainer(model=MODEL, datasets=datasets, metrics=metrics, LOG=LOG)

    try:
        trainer.train(**config["training"])
    except Exception as e:
        LOG('slack', name='warning', values='abrupt end, {}'.format(e))
        LOG.finish()
        print('abrupt end, {}'.format(e))
        print(traceback.format_exc())

    infer = Inference(
        model=MODEL,
        datasets=datasets,
        LOG=LOG,
        metrics=metrics,
        visualizations=None,
    )

    infer()

    LOG.finish()
Exemplo n.º 22
0
import dataLoader
import model
from keras.models import model_from_json
from keras.callbacks import TensorBoard
import os 
#os.environ['CUDA_VISIBLE_DEVICES'] = '1'
processedData, unprocessedData = dataLoader.fetch_data('data/')


autoencoder = model.getModel(256, 256, 8)

x_train = processedData
x_test = processedData[1,:,:,:].reshape(1,256,256,8)
autoencoder.fit(x_train, x_train, epochs=100000, batch_size=4, shuffle=True, validation_data=(x_test, x_test),
                callbacks=[TensorBoard(log_dir='conv_autoencoder')], verbose=2)
                
autoencoder_json = autoencoder.to_json()
with open("model.json", "w") as json_file:
    json_file.write(autoencoder_json)
# serialize weights to HDF5
autoencoder.save_weights("model.h5")
print("Saved model to disk")
autoencoder.save('autoencoder.h5')
Exemplo n.º 23
0
def main():
  now = datetime.datetime.now()
  logger = Logger(args.save_path + '/logs_{}'.format(now.isoformat()))

  model = getModel(args)
  cudnn.benchmark = True
  optimizer = torch.optim.SGD(model.parameters(), args.LR,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)

  valSource_dataset = SourceDataset('test', ref.nValViews)
  valTarget_dataset = TargetDataset('test', ref.nValViews)
  
  valSource_loader = torch.utils.data.DataLoader(valSource_dataset, batch_size = 1, 
                        shuffle=False, num_workers=1, pin_memory=True, collate_fn=collate_fn_cat)
  valTarget_loader = torch.utils.data.DataLoader(valTarget_dataset, batch_size = 1, 
                        shuffle=False, num_workers=1, pin_memory=True, collate_fn=collate_fn_cat)
  
  if args.test:
    f = {}
    for split in splits:
      f['{}'.format(split)] = open('{}/{}.txt'.format(args.save_path, split), 'w')
    test(args, valSource_loader, model, None, f['valSource'], 'valSource')
    test(args, valTarget_loader, model, None, f['valTarget'], 'valTarget')
    return
  
  train_dataset = Fusion(SourceDataset, TargetDataset, nViews = args.nViews, targetRatio = args.targetRatio, totalTargetIm = args.totalTargetIm)
  trainTarget_dataset = train_dataset.targetDataset
  
  train_loader = torch.utils.data.DataLoader(
      train_dataset, batch_size=args.batchSize, shuffle=not args.test,
      num_workers=args.workers if not args.test else 1, pin_memory=True, collate_fn=collate_fn_cat)
  trainTarget_loader = torch.utils.data.DataLoader(
      trainTarget_dataset, batch_size=args.batchSize, shuffle=False,
      num_workers=args.workers if not args.test else 1, pin_memory=True, collate_fn=collate_fn_cat)

  M = None
  if args.shapeWeight > ref.eps:
    print 'getY...'
    Y = getY(train_dataset.sourceDataset)
    M = initLatent(trainTarget_loader, model, Y, nViews = args.nViews, S = args.sampleSource, AVG = args.AVG)
  
  print 'Start training...'
  for epoch in range(1, args.epochs + 1):
    adjust_learning_rate(optimizer, epoch, args.dropLR)
    train_mpjpe, train_loss, train_unSuploss = train(args, train_loader, model, optimizer, M, epoch)
    valSource_mpjpe, valSource_loss, valSource_unSuploss = validate(args, 'Source', valSource_loader, model, None, epoch)
    valTarget_mpjpe, valTarget_loss, valTarget_unSuploss = validate(args, 'Target', valTarget_loader, model, None, epoch)

    train_loader.dataset.targetDataset.shuffle()
    if args.shapeWeight > ref.eps and epoch % args.intervalUpdateM == 0:
      M = stepLatent(trainTarget_loader, model, M, Y, nViews = args.nViews, lamb = args.lamb, mu = args.mu, S = args.sampleSource)

    logger.write('{} {} {}\n'.format(train_mpjpe, valSource_mpjpe, valTarget_mpjpe))
    
    logger.scalar_summary('train_mpjpe', train_mpjpe, epoch)
    logger.scalar_summary('valSource_mpjpe', valSource_mpjpe, epoch)
    logger.scalar_summary('valTarget_mpjpe', valTarget_mpjpe, epoch)
    
    logger.scalar_summary('train_loss', train_loss, epoch)
    logger.scalar_summary('valSource_loss', valSource_loss, epoch)
    logger.scalar_summary('valTatget_loss', valTarget_loss, epoch)
    
    logger.scalar_summary('train_unSuploss', train_unSuploss, epoch)
    logger.scalar_summary('valSource_unSuploss', valSource_unSuploss, epoch)
    logger.scalar_summary('valTarget_unSuploss', valTarget_unSuploss, epoch)
    
    if epoch % 10 == 0:
      torch.save({
        'epoch': epoch + 1,
        'arch': args.arch,
        'state_dict': model.state_dict(),
        'optimizer' : optimizer.state_dict(),
      }, args.save_path + '/checkpoint_{}.pth.tar'.format(epoch))
  logger.close()
Exemplo n.º 24
0
        result_data.append([bag, class_row])
    # print 'tf data = {}'.format(result_data)
    return result_data


if __name__ == '__main__':
    classes, all_words, docs = parseData()
    training_data = prepareDataForTf(classes, all_words, docs)
    random.shuffle(training_data)
    training = np.array(training_data)
    train_x = list(training[:, 0])
    train_y = list(training[:, 1])
    # print ("shape = {}\nx = {}\ny = {}".format(training.shape, train_x, train_y))

    tf.reset_default_graph()
    net = getModel(len(train_x[0]), len(train_y[0]))
    # Define model and setup tensorboard
    model = tflearn.DNN(net, tensorboard_dir=DATA_PATH + 'tflearn_logs')
    # Start training (apply gradient descent algorithm)
    model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=False)
    model.save(MODEL_FILE)

    with open(WORDS_FILE, 'w') as f:
        pickle.dump(
            {
                'words': all_words,
                'classes': classes,
                'input_tensor_length': len(train_x[0]),
                'output_tensor_length': len(train_y[0])
            }, f)
Exemplo n.º 25
0
from config import config

from soundProcessing import batchgen_and_numsamples

from model import getModel

from keras import callbacks
import os


bgen, numSamps = batchgen_and_numsamples()

model = getModel()


class SaveEpoch(callbacks.Callback):
    def on_epoch_end(self, epch, thrd = {}):
        model.save_weights(os.path.join(config["projectDir"], "savedModel_%d"%epch), 
                overwrite=True)


model.fit_generator(bgen,
        samples_per_epoch=numSamps,
        nb_epoch= int(config["epochs"]) ,
        max_q_size = 100,
        callbacks=[SaveEpoch()]
)
Exemplo n.º 26
0
def main():
    now = datetime.datetime.now()
    logger = Logger(args.save_path + '/logs_{}'.format(now.isoformat()))

    model = getModel(args)
    cudnn.benchmark = True
    optimizer = torch.optim.SGD(model.parameters(),
                                args.LR,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    valSource_dataset = SourceDataset('test', ref.nValViews)
    valTarget_dataset = TargetDataset('test', ref.nValViews)

    valSource_loader = torch.utils.data.DataLoader(valSource_dataset,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=1,
                                                   pin_memory=True,
                                                   collate_fn=collate_fn_cat)
    valTarget_loader = torch.utils.data.DataLoader(valTarget_dataset,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=1,
                                                   pin_memory=True,
                                                   collate_fn=collate_fn_cat)

    if args.test:
        f = {}
        for split in splits:
            f['{}'.format(split)] = open(
                '{}/{}.txt'.format(args.save_path, split), 'w')
        test(args, valSource_loader, model, None, f['valSource'], 'valSource')
        test(args, valTarget_loader, model, None, f['valTarget'], 'valTarget')
        return

    train_dataset = Fusion(SourceDataset,
                           TargetDataset,
                           nViews=args.nViews,
                           targetRatio=args.targetRatio,
                           totalTargetIm=args.totalTargetIm)
    trainTarget_dataset = train_dataset.targetDataset

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batchSize,
        shuffle=not args.test,
        num_workers=args.workers if not args.test else 1,
        pin_memory=True,
        collate_fn=collate_fn_cat)
    trainTarget_loader = torch.utils.data.DataLoader(
        trainTarget_dataset,
        batch_size=args.batchSize,
        shuffle=False,
        num_workers=args.workers if not args.test else 1,
        pin_memory=True,
        collate_fn=collate_fn_cat)

    M = None
    if args.shapeWeight > ref.eps:
        print 'getY...'
        Y = getY(train_dataset.sourceDataset)
        M = initLatent(trainTarget_loader,
                       model,
                       Y,
                       nViews=args.nViews,
                       S=args.sampleSource,
                       AVG=args.AVG)

    print 'Start training...'
    for epoch in range(1, args.epochs + 1):
        adjust_learning_rate(optimizer, epoch, args.dropLR)
        train_mpjpe, train_loss, train_unSuploss = train(
            args, train_loader, model, optimizer, M, epoch)
        valSource_mpjpe, valSource_loss, valSource_unSuploss = validate(
            args, 'Source', valSource_loader, model, None, epoch)
        valTarget_mpjpe, valTarget_loss, valTarget_unSuploss = validate(
            args, 'Target', valTarget_loader, model, None, epoch)

        train_loader.dataset.targetDataset.shuffle()
        if args.shapeWeight > ref.eps and epoch % args.intervalUpdateM == 0:
            M = stepLatent(trainTarget_loader,
                           model,
                           M,
                           Y,
                           nViews=args.nViews,
                           lamb=args.lamb,
                           mu=args.mu,
                           S=args.sampleSource)

        logger.write('{} {} {}\n'.format(train_mpjpe, valSource_mpjpe,
                                         valTarget_mpjpe))

        logger.scalar_summary('train_mpjpe', train_mpjpe, epoch)
        logger.scalar_summary('valSource_mpjpe', valSource_mpjpe, epoch)
        logger.scalar_summary('valTarget_mpjpe', valTarget_mpjpe, epoch)

        logger.scalar_summary('train_loss', train_loss, epoch)
        logger.scalar_summary('valSource_loss', valSource_loss, epoch)
        logger.scalar_summary('valTatget_loss', valTarget_loss, epoch)

        logger.scalar_summary('train_unSuploss', train_unSuploss, epoch)
        logger.scalar_summary('valSource_unSuploss', valSource_unSuploss,
                              epoch)
        logger.scalar_summary('valTarget_unSuploss', valTarget_unSuploss,
                              epoch)

        if epoch % 10 == 0:
            torch.save(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, args.save_path + '/checkpoint_{}.pth.tar'.format(epoch))
    logger.close()
Exemplo n.º 27
0
def train(train_batch, validate_batch, test_data, mask):
    # nb_train = len(train_data)
    x = tf.placeholder(tf.float32, shape=(None, 256, 256, 24), name='x_input')
    y_ = tf.placeholder(tf.float32, shape=(None, 256, 256, 24), name='y_label')
    x_k = tf.placeholder(tf.complex64,
                         shape=(None, 256, 256, 12),
                         name='x_kspace')
    mask_k = tf.placeholder(tf.complex64,
                            shape=(None, 256, 256, 12),
                            name='mask')

    features, labels, kx_mask, mask_c = setup_inputs(train_batch, mask,
                                                     BATCH_SIZE)
    f_val, l_val, kx_val, m_val = setup_inputs(validate_batch, mask,
                                               BATCH_SIZE)
    y = getModel(x, x_k, mask_k)
    global_step = tf.Variable(0., trainable=False)
    with tf.name_scope('mse_loss'):
        total_loss = mae(y_, y)
    lr = tf.train.exponential_decay(
        lr_base,
        global_step=global_step,
        decay_steps=(nb_train_samples + BATCH_SIZE - 1) // BATCH_SIZE,
        decay_rate=lr_decay_rate,
        staircase=False)
    with tf.name_scope("train"):
        train_step = tf.train.AdamOptimizer(lr).minimize(
            total_loss, global_step=global_step)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)

        for i in range(EPOCHS):
            loss_sum = 0.0
            count_batch = 0
            ave_loss = 0.
            nb_batches = int(np.ceil(nb_train_samples // BATCH_SIZE))
            for n_batch in range(nb_batches):
                features_trian, labels_train, kx_mask_train, mask_c_train = sess.run(
                    [features, labels, kx_mask, mask_c])
                _, loss_value, step = sess.run(
                    [train_step, total_loss, global_step],
                    feed_dict={
                        x: features_trian,
                        y_: labels_train,
                        x_k: kx_mask_train,
                        mask_k: mask_c_train
                    })

                loss_sum += loss_value
                count_batch += 1
                ave_loss = loss_sum / count_batch
                loss['batch'].append(ave_loss)
                print('Epoch %3d-batch %3d/%3d  training loss: %8f' %
                      (i + 1, count_batch, nb_batches, ave_loss))

                # evaluate
                if count_batch % eval_every == 0:
                    count_batch_val = 0
                    loss_sum_val = 0.
                    ave_loss_val = 0.
                    loss['count'].append(ave_loss)
                    nb_batches_val = int(np.ceil(nb_val // BATCH_SIZE))
                    for n_batch_val in range(nb_batches_val):
                        features_val, labels_val, kx_mask_val, mask_c_val = sess.run(
                            [f_val, l_val, kx_val, m_val])
                        loss_value_val, pred_val = sess.run(
                            [total_loss, y],
                            feed_dict={
                                x: features_val,
                                y_: labels_val,
                                x_k: kx_mask_val,
                                mask_k: mask_c_val
                            })

                        loss_sum_val += loss_value_val
                        count_batch_val += 1
                        ave_loss_val = loss_sum_val / count_batch_val
                        print('Epoch %3d-batch %3d/%3d  validation loss: %8f' %
                              (i + 1, count_batch_val, n_batch_val,
                               ave_loss_val))

                    val_loss.append(ave_loss_val)
            loss['epoch'].append(ave_loss)
            saver.save(sess,
                       os.path.join(model_save_path, model_name),
                       global_step=global_step)

            # test every 5 epochs
            if (i + 1) % test_every == 0:
                count = 0
                for y_test, n_batch in iterate_minibatch(test_data,
                                                         batch_size=1,
                                                         shuffle=False):
                    features_test, labels_test, kx_mask_test, mask_c_test = setup_inputs_test(
                        y_test, mask, norm=None)
                    test_loss, prediction = sess.run(
                        [total_loss, y],
                        feed_dict={
                            x: features_test,
                            y_: labels_test,
                            x_k: kx_mask_test,
                            mask_k: mask_c_test
                        })
                    count += 1
                    print('The loss of NO. %2d test data is %.8f' %
                          (count, test_loss))

                    pred_c = real2complex_array(prediction)
                    pred = np.squeeze(
                        np.sqrt(np.sum(np.square(np.abs(pred_c)), axis=-1)))
                    plt.figure(1)
                    plt.imshow(pred, cmap='gray')
                    fig_name = os.path.join('./result',
                                            '%d_out_%d.png' % (count, i + 1))
                    plt.savefig(fig_name)

        coord.request_stop()
        coord.join(threads)
        loss_plot('batch')
        loss_plot('count')
        np.savetxt(os.path.join("./result", 'train_batch_loss.txt'),
                   np.asarray(loss['batch']))
        np.savetxt(os.path.join("./result", 'train_count_loss.txt'),
                   np.asarray(loss['count']))
        np.savetxt(os.path.join("./result", 'val_loss.txt'),
                   np.asarray(val_loss))
import torch
from torch import optim
import PIL.Image
import utils
import model
import cv2

STEPS = 2500

model = model.getModel()

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

content_image = utils.load_image(
    'https://www.rover.com/blog/wp-content/uploads/2019/01/6342530545_45ec8696c8_b-960x540.jpg'
).to(device)

style_image = utils.load_image(
    'https://images2.minutemediacdn.com/image/upload/c_crop,h_1595,w_2835,x_0,y_408/f_auto,q_auto,w_1100/v1556647489/shape/mentalfloss/62280-mona_lisa-wiki.jpg'
).to(device)

# get content and style features only once before training
content_features = utils.get_features(content_image, model)
style_features = utils.get_features(style_image, model)

# calculate the gram matrices for each layer of our style representation
style_grams = {
    layer: utils.gram_matrix(style_features[layer])
    for layer in style_features
}
Exemplo n.º 29
0
 def setUpClass(self):
     datapath = os.path.join(os.path.dirname(__file__), 'data',
                             'test_conversations.json')
     self.model = getModel(datapath)
Exemplo n.º 30
0
from keras import models, layers
import numpy as np
import sincnet
from keras.layers import Dense, Dropout, Activation

print('N_filt ' + str(cnn_N_filt))
print('N_filt len ' + str(cnn_len_filt))
print('FS ' + str(fs))
print('WLEN ' + str(wlen))

input_shape = (wlen, 1)
out_dim = class_lay[0]
from model import getModel

model = getModel(input_shape, out_dim)
optimizer = RMSprop(lr=lr, rho=0.9, epsilon=1e-8)
model.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])

checkpoints_path = os.path.join(output_folder, 'checkpoints')

tb = TensorBoard(log_dir=os.path.join(output_folder, 'logs', 'SincNet'))
checkpointer = ModelCheckpoint(filepath=os.path.join(checkpoints_path,
                                                     'SincNet.hdf5'),
                               verbose=1,
                               save_best_only=False)

if not os.path.exists(checkpoints_path):
    os.makedirs(checkpoints_path)
Exemplo n.º 31
0
def getSingleModel():
    """获得单个模型的名称"""
    names, models = model.getModel()

    return names