コード例 #1
0
def train():
    req_data = request.get_json()
    batchsizeinput = req_data['batchsizeinput']
    nbepochinput = req_data['nbepochinput']
    print("batchsizeinput:" + batchsizeinput)
    print("nbepochinput:" + nbepochinput)
    trainModel(int(batchsizeinput), int(nbepochinput))
コード例 #2
0
def main():
    while True:
        mode = input("train or solve? -> ")
        if mode == "train":
            da.augument()
            epc = input("epoch(1~9)? -> ")
            if epc.isdecimal() and 1 <= int(epc) <= 9:
                tr.trainModel(epc=epc)
            else:
                print("epoch=5")
                tr.trainModel()
        elif mode == "solve":
            solve()
        else:
            print("invalid input")
            continue
        break
コード例 #3
0
def main():
    startTime = datetime.now()
    train_data, train_labels, bestN, bestK = trainModel()
    print('finished training the model', datetime.now() - startTime)
    carList = testModel(train_data, train_labels, bestN, bestK)
    print('finished', datetime.now() - startTime)

    print(carList)
    return carList
コード例 #4
0
ファイル: main.py プロジェクト: cscn89/image_classification
def main():
    parser = OptionParser()
    parser.add_option("-p", "--testpath", dest="test_path", help="provide the test root path")
    parser.add_option("--resultpath", dest="result_path", help="provide the result path")
    (options, args) = parser.parse_args()
    test_path = options.test_path
    result_path = options.result_path

    train_start = time.time()
    trainModel()
    train_end = time.time()
    elasped_time = train_end-train_start
    print(humanTime(elasped_time))

    test_start = time.time()
    testModel(test_path,result_path)
    test_end = time.time()
    elasped_time = test_end-test_start
    print(humanTime(elasped_time))
コード例 #5
0
def train():
    # get model name from string param
    modelNameParam = request.args['modelName']

    # Enforce a default model as validation (TREE is default model)
    if modelNameParam == RAND:
        model_evaluation = trainModel(RAND)
        model_name = 'Random Forest Classifier'
    else:
        model_evaluation = trainModel(TREE)
        model_name = 'Decision Tree Classifier'

    msg = 'A new ' + model_name + ' model has been created...'

    evaluation_results = {'modelEvaluation': model_evaluation, 'msg': msg}

    response = Response(response=json.dumps(evaluation_results),
                        status=200,
                        mimetype='application/json')
    return response
コード例 #6
0
from reader import getDicts
from reader import read_poems
from train import trainModel

checkpoint_dir = os.path.join('.')
exclusion = ['*']

print 'Character: ', sys.argv[1]

vocab_size = 2000

index_to_char, char_to_index = getDicts(vocab_size)
data = read_poems(char_to_index)
with tf.variable_scope("trainModel"):
    model = trainModel(training=False, infer=True)

with tf.Session() as sess:
    tf.initialize_all_variables().run()
    saver = tf.train.Saver(tf.all_variables())
    print '-------', type(tf.all_variables())
    for a in tf.all_variables():
        print a.name
    print '-------'

    ckpt = tf.train.get_checkpoint_state(checkpoint_dir)

    print 'ckpt.model_checkpoint_path: ', ckpt.model_checkpoint_path

    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
コード例 #7
0
def main(args):
    ##################################################################
    # Top level code for running hyperoptimization
    # User specifies model type and layer number
    # Code then finds optimal hyperparameters for all
    # 		combinations of models/layers
    ##################################################################

    # Load in arguments
    n_models = args.n_models
    n_epochs = args.n_epochs
    hyp_epochs = args.hyp_epochs
    load = args.resume
    model_type = args.model
    layers = args.layers
    image_size = args.image_size
    exp_name = args.exp_name

    # Make sure the result directory exists.  If not create
    directory_logs = '../../PredPrey_Results/Decision/Logs'
    directory_results = '../../PredPrey_Results/Decision/ResultBlock'

    if not os.path.exists(directory_logs):
        os.makedirs(directory_logs)

    if not os.path.exists(directory_results):
        os.makedirs(directory_results)

    # Create name for result folders
    log_file = '../../PredPrey_Results/Decision/Logs/' + exp_name + '.log'
    hyperopt_file = '../../PredPrey_Results/Decision/ResultBlock/hyperparameter_' + exp_name + '.pth.tar'
    result_file = '../../PredPrey_Results/Decision/ResultBlock/resultBlock_' + exp_name + '.pth.tar'
    model_file = '../../PredPrey_Results/Decision/ResultBlock/modelBlock_' + exp_name + '.pth.tar'

    # Initizlize Logger
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)

    formatter = logging.Formatter('[%(asctime)s:%(name)s]:%(message)s')

    file_handler = logging.FileHandler(log_file)
    file_handler.setFormatter(formatter)

    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(formatter)

    logger.addHandler(file_handler)
    logger.addHandler(stream_handler)

    # Print experiment parameters to log
    logger.info(
        'Training %s models for %d hyperband epochs of %d epochs each.' %
        (model_type, hyp_epochs, n_epochs))
    logger.info('Initial number of models: %d' % (n_models))

    # Setup network parameters
    num_nodes = image_size**2
    input_size = num_nodes
    hidden_size = num_nodes
    loss_fn = nn.BCELoss()
    dtype = torch.FloatTensor
    if args.use_gpu:
        print('GPU is used.')
        dtype = torch.cuda.FloatTensor

    hyperparameter = {}

    # Run hyperband epoch
    modelBlock, resultBlock = generateDictionary_Hyperopt(
        n_models, model_type, layers, input_size, hidden_size, image_size,
        loss_fn, dtype)

    torch.save(resultBlock, result_file)
    modelBlock_State = convertStateDict(modelBlock)
    torch.save(modelBlock_State, model_file)

    for h_epoch in range(hyp_epochs):
        trainModel(modelBlock, n_epochs, log_file)
        pruneModel(modelBlock, resultBlock)
        torch.save(resultBlock, result_file)
        modelBlock_State = convertStateDict(modelBlock)
        torch.save(modelBlock_State, model_file)

    trainModel(modelBlock, n_epochs, log_file)

    epoch_total = modelBlock["Meta"]["Epochs_Trained"]
    resultBlock["Meta"]["Total_Epochs"] = epoch_total

    # Find the model id with best loss and return its parameters
    best_loss = 1000.0
    for key, val in modelBlock.items():
        if (key != "Meta"):
            resultBlock[key][epoch_total] = {
                "Loss": modelBlock[key]["Loss"],
                "Acc_All": modelBlock[key]["Acc_All"]
            }
            resultBlock[key]["Hyperparameter"]["Max_Epoch"] = epoch_total

            if ((modelBlock[key]["Loss"] < best_loss)):
                best_loss = modelBlock[key]["Loss"]
                best_key = key

    # This ensures that values are returned even if none of the keys have loss >= 1000.0
    # This should not happen so print an error to the log
    if (best_loss >= 1000.0):
        logger.warning('All models had loss greater than 1000.0')
        logger.warning('Returning parameters for first remaining model')
        keys = list(modelBlock.keys())
        keys.remove("Meta")
        best_key = next(iter(keys))

    lr = modelBlock[best_key]["Learning"]
    batch_size = modelBlock[best_key]["Batch"]
    weight_decay = modelBlock[best_key]["Weight_Decay"]
    acc = modelBlock[best_key]["Accuracy"]
    avg_loss = modelBlock[best_key]["Loss"]

    resultBlock["Meta"]["Learning"] = modelBlock[best_key]["Learning"]
    resultBlock["Meta"]["Batch"] = modelBlock[best_key]["Batch"]
    resultBlock["Meta"]["Weight_Decay"] = modelBlock[best_key]["Weight_Decay"]
    resultBlock["Meta"]["Acc_All"] = modelBlock[best_key]["Acc_All"]
    resultBlock["Meta"]["Loss"] = modelBlock[best_key]["Loss"]
    resultBlock["Meta"]["Best_Key"] = best_key

    torch.save(resultBlock, result_file)
    modelBlock_State = convertStateDict(modelBlock)
    torch.save(modelBlock_State, model_file)

    if (not (model_type in hyperparameter)):
        hyperparameter[model_type] = {}
    if (not (layers in hyperparameter[model_type])):
        hyperparameter[model_type][layers] = {}
    hyperparameter[model_type][layers]["Learning"] = lr
    hyperparameter[model_type][layers]["Batch"] = batch_size
    hyperparameter[model_type][layers]["Weight_Decay"] = weight_decay
    hyperparameter[model_type][layers]["Acc"] = acc
    hyperparameter[model_type][layers]["Loss"] = avg_loss

    torch.save(resultBlock, result_file)
    torch.save(hyperparameter, hyperopt_file)
コード例 #8
0
import numpy as np
from train import trainModel

input_path = "data/final_dataset.csv"
target_path = "data/salary_final.csv"

test_input_path = "data/final_dataset.csv"
test_target_path = "data/salary_final.csv"

param_grid = {
    #  'reg__max_depth': np.arange(4, 16, 4),
    #  'reg__n_estimators': np.arange(20, 100, 20)
}

# Model names:
#   - XGBRegressor ...............'XGBoost'
#   - RandomForestRegressor ......'RandomForest'
trainModel(input_path,
           target_path,
           test_input_path,
           test_target_path,
           grid=param_grid,
           model_name='LinearRegression',
           min_cut=100,
           max_cut=700000,
           cv=4,
           eval=True)
コード例 #9
0
ファイル: main.py プロジェクト: nandha1nks/LID
import train
import test
import torch
import numpy as np

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--config',dest="config", default="config.yaml")
    #config = load(open(parser.parse_args().config))
    with open("config.yaml", 'r') as stream:
        try:
            config = yaml.safe_load(stream)
        except yaml.YAMLError as exc:
            print(exc)

    if config['train']:
        model, lossHist = train.trainModel(config)
        torch.save(model.state_dict(), config['modelLoc'] + config['runName'] +
                   "Weights.pth")
        
        #Saves only weights (need to create model object and load this)
        #torch.save(model, config['modelLoc'] + config['runName'] + "Full.pth")
        #Saves entire Model (not advised, can break lot of ways due to directory issue)

    if config['test']:
        confMatrix, metrics = test.testModel(config)
        np.save(config['modelLoc'] + config['runName'] + "Confusion.npy",confMatrix)
        f = open(config['modelLoc'] + config['runName'] + "Metrics.txt",'w+')
        f.write(metrics)
        f.close()
        
コード例 #10
0
ファイル: main.py プロジェクト: pythonshihe/train_codes
def main():
    feature, label = get_feature()  # 获取分割图片的特征值和对应的标签
    # 将特征值和标签加入模型进行训练
    result = trainModel(feature, label)  # 训练模型
コード例 #11
0
def main(args):
	##################################################################
	# Top level code for running hyperoptimization
	# User specifies model type and layer number
	# Code then finds optimal hyperparameters for all
	# 		combinations of models/layers
	##################################################################

	# Load in arguments
	n_models = args.n_models
	n_epochs = args.n_epochs
	load_experiment = args.resume
	load_result = args.resume_result
	hyper_path = args.hyper
	model_type = args.model
	layers = args.layers
	image_size = args.image_size
	lr = args.lr
	exp_name = args.exp_name

	# Make sure the result directory exists.  If not create
	directory_logs = '../../PredPrey_Results/Propagation/Logs'
	directory_results = '../../PredPrey_Results/Propagation/ResultBlock'

	if not os.path.exists(directory_logs):
		os.makedirs(directory_logs)

	if not os.path.exists(directory_results):
		os.makedirs(directory_results)


	# Create name for result folders
	log_file = '../../PredPrey_Results/Propagation/Logs/'+ exp_name + '.log'
	result_file = '../../PredPrey_Results/Propagation/ResultBlock/resultBlock_' + exp_name + '.pth.tar'
	model_file = '../../PredPrey_Results/Propagation/ResultBlock/modelBlock_' + exp_name + '.pth.tar'

	# Initizlize Logger
	logger = logging.getLogger(__name__)
	logger.setLevel(logging.INFO)

	formatter = logging.Formatter('[%(asctime)s:%(name)s]:%(message)s')

	file_handler = logging.FileHandler(log_file)
	file_handler.setFormatter(formatter)

	stream_handler = logging.StreamHandler()
	stream_handler.setFormatter(formatter)

	logger.addHandler(file_handler)
	logger.addHandler(stream_handler)

	# Print experiment parameters to log
	logger.info('Training %s models with %i layers for %d epochs.' % (model_type, layers, n_epochs))
	logger.info('Number of models: %d' % (n_models))


	# Want to change this so that hyperparameter can only be loaded
	if os.path.isfile(hyper_path):
		print('Loading hyperparameter block.')
		hyperparameter = torch.load(hyper_path)
	else:
		print("=> no hyperparameter block found at '{}'".format(hyper_path))
		hyperparameter = {}
		hyperparameter[model_type] = {}
		hyperparameter[model_type][layers] = {"Learning": lr, "Batch": 32, "Weight_Decay": 0}


	# Set up experiment block
	num_nodes = image_size**2
	loss_fn = nn.MSELoss()
	dtype = torch.FloatTensor
	if args.use_gpu:
		print('GPU is used.')
		dtype = torch.cuda.FloatTensor
	

	if ((load_experiment) and os.path.isfile(load_experiment) and os.path.isfile(load_result)):
		modelBlock = torch.load(load_experiment)
		resultBlock = torch.load(load_result)
	else:
		print("=> Generating new result block")
		modelBlock, resultBlock = generateDictionary_Exp(n_models, model_type, layers, num_nodes, num_nodes,
			image_size, loss_fn, dtype, hyperparameter)



	# Figure out how many epochs are left to train
	epochs_remaining = n_epochs - modelBlock["Meta"]["Epochs_Trained"]

	trainModel(modelBlock, epochs_remaining, log_file)

	# torch.save(resultBlock, result_file)

	modelBlock_State = convertStateDict(modelBlock)
	torch.save(modelBlock_State, model_file)
コード例 #12
0
        lmbda = 5 if not args.lmbda else int(args.lmbda)
        model = StyleTransfer(device)
        loss_fn = ContentStyleLoss(lmbda).to(device)

        contentTrainPath = args.content_image
        styleTrainPath = args.style_image

        # if args.lr:
        #     lr = args.lr
        # if args.wd:
        #     wd = args.wd

        model = trainModel(model,
                           loss_fn,
                           *getDataset(contentTrainPath,
                                       styleTrainPath,
                                       val=args.val,
                                       bs=args.bs),
                           device=device)

    elif (args.action == "run_multiple_styles"):
        contentImage = args.content_image
        styleImages = args.style_image.split(',')

        contName = NameExtract(contentImage)

        if args.weights is None:
            weights = [1 / len(styleImages) for _ in range(len(styleImages))]
        else:
            weights = args.weights.split(',')
            weights = [float(i) for i in weights]
コード例 #13
0
ファイル: example.py プロジェクト: CVaranese/SteveTheBot
def main():
    chain = None

    parser = argparse.ArgumentParser(
        description='Example of libmelee in action')
    parser.add_argument('--port',
                        '-p',
                        type=check_port,
                        help='The controller port your AI will play on',
                        default=2)
    parser.add_argument('--opponent',
                        '-o',
                        type=check_port,
                        help='The controller port the opponent will play on',
                        default=1)
    parser.add_argument('--debug',
                        '-d',
                        action='store_true',
                        help='Debug mode. Creates a CSV of all game state')
    parser.add_argument(
        '--framerecord',
        '-r',
        default=False,
        action='store_true',
        help='Records frame data from the match, stores into framedata.csv')
    parser.add_argument('model',
                        type=str,
                        default="Steve",
                        help='The file of the AI')

    args = parser.parse_args()

    log = None
    if args.debug:
        log = melee.logger.Logger()

    framedata = melee.framedata.FrameData(args.framerecord)
    opponent_type = melee.enums.ControllerType.UNPLUGGED

    #Create our Dolphin object. This will be the primary object that we will interface with
    dolphin = melee.dolphin.Dolphin(ai_port=args.port,
                                    opponent_port=args.opponent,
                                    opponent_type=opponent_type,
                                    logger=log)
    #Create our GameState object for the dolphin instance
    gamestate = melee.gamestate.GameState(dolphin)
    #Create our Controller object that we can press buttons on
    controller = melee.controller.Controller(port=args.port, dolphin=dolphin)

    signal.signal(signal.SIGINT, signal_handler)

    #Run dolphin and render the output
    dolphin.run(render=True)

    #Plug our controller in
    #   Due to how named pipes work, this has to come AFTER running dolphin
    #   NOTE: If you're loading a movie file, don't connect the controller,
    #   dolphin will hang waiting for input and never receive it
    controller.connect()

    model = []
    modelFile = Path("models/" + args.model)
    if modelFile.exists():
        model = keras.models.load_model('models/' + args.model)
    else:
        model = Model.buildModel()
        model.save('models/' + args.model)

    #Main loop
    numGames = 0
    allMemories = np.array([])
    gameMemory = []
    prevObservation = []
    observation = []
    output = []
    score = 0
    totalScore = []
    pickedCPU = 0
    firstStart = False
    stepNum = 0
    batchNum = 0
    fixedData = []
    recordedMem = []
    tempMem = []
    prediction = []
    while True:
        stepNum += 1
        if stepNum >= 1000:
            print("Still Going!")
            stepNum = 0
        #"step" to the next frame
        gamestate.step()
        #What menu are we in?
        if gamestate.menu_state == melee.enums.Menu.IN_GAME:
            tempReward = None
            prevObservation = observation
            observation = gamestate.tolist()
            fixedData = dataFix.normalizeData(observation)
            if len(prevObservation) > 0:
                score += rewards.calcReward(prevObservation, observation)
            if len(prediction) == 0:
                fixedData = np.append(fixedData,
                                      keras.utils.to_categorical(30))
            else:
                fixedData = np.append(fixedData, prediction)
            fixedArray = []
            for i in range(len(gameMemory) - 14, len(gameMemory)):
                if i < 0:
                    fixedArray.append(np.zeros(50))
                else:
                    fixedArray.append(gameMemory[i][0])
            fixedArray.append(fixedData)
            fixedArray = np.array(fixedArray).reshape(1, 15, 50)
            firstPrediction = model.predict(fixedArray)
            #firstPrediction = model.predict(np.array(fixedData).reshape(-1, 1, len(fixedData)))
            prediction = firstPrediction
            if stepNum == 999:
                print("Before Rand: ", prediction)
                print("OBS: ", fixedData)
                print("Random: ", batchNum, "num: ",
                      math.exp(-.5 * (.25 + batchNum)))
            maxIndex = np.argmax(prediction, axis=1)[0]
            if random.random() < (.05):  #, math.exp((-.5)*(.25 + batchNum))):
                maxIndex = random.randint(0, 30)
            prediction = keras.utils.to_categorical(maxIndex,
                                                    num_classes=len(
                                                        prediction[0]))

            button, stick = Utility.decide_action(maxIndex)
            controller.simple_press(stick[0], stick[1], button)

            gameMemory.append([(fixedData), (prediction), tempReward])
            recordedMem.append([fixedData, firstPrediction, tempReward])
            if stepNum == 999:
                #print("CURGAMEMEM: ", gameMemory[len(gameMemory)-1])
                #print("FIRSTGAMEMEM: ", gameMemory[0])
                #print("SECONDGAMEMEM: ", gameMemory[1])
                print("After Rand: ", prediction)
                #pass

            #plug prediction in to move function

        #If we're at the character select screen, choose our character
        elif gamestate.menu_state == melee.enums.Menu.CHARACTER_SELECT:
            #if (gamestate.player[1].controller_status != melee.enums.ControllerStatus.CONTROLLER_CPU
            #    and pickedCPU == 0):
            #    melee.menuhelper.changecontrollerstatus(controller, gamestate, 1,
            #        melee.enums.ControllerStatus.CONTROLLER_CPU, character=melee.enums.Character.GANONDORF)
            #else:
            #    pickedCPU = 1
            melee.menuhelper.choosecharacter(
                character=melee.enums.Character.KIRBY,
                gamestate=gamestate,
                controller=controller,
                swag=True,
                start=True)
        #If we're at the postgame scores screen, spam START
        elif gamestate.menu_state == melee.enums.Menu.POSTGAME_SCORES:
            firstStart = True
            if len(gameMemory) > 0:
                gameMemory[0][2] = score
                print("ADDED SCORE: ", score)
                print("GAME MEM: ", gameMemory[0])
                totalScore.append(score)
                print("SCORE: ", score)
                print("lenMemories: ", len(allMemories))
                # FIX FIX FIX (list of game memories)
                print("NUM: ", numGames)
                if len(allMemories) == 0:
                    allMemories = gameMemory
                else:
                    allMemories = np.concatenate([allMemories, gameMemory],
                                                 axis=0)
                print(allMemories[len(allMemories) - 1])
                gameMemory = []
                score = 0
                numGames += 1
                if numGames >= batchSize:
                    batchNum += 1
                    allMemories = rewards.fillRewards(allMemories, totalScore,
                                                      numGames)
                    recordedMem = rewards.fillRewards(recordedMem, totalScore,
                                                      numGames)
                    f = open('logs/batch' + str(batchNum) + '.txt', 'w+')
                    #for val in recordedMem:
                    #   f.write(str(val) + '\n')
                    #f.close()
                    model = train.trainModel(allMemories, model)
                    model.save("models/" + args.model)
                    numGames = 0
                    allMemories = np.array([])
                    prevObservation = []
                    recordedMem = []
                    observation = []
                    totalScore = []
            melee.menuhelper.skippostgame(controller=controller)
        #If we're at the stage select screen, choose a stage
        elif gamestate.menu_state == melee.enums.Menu.STAGE_SELECT:
            melee.menuhelper.choosestage(
                stage=melee.enums.Stage.FINAL_DESTINATION,
                gamestate=gamestate,
                controller=controller)
        #Flush any button presses queued up
        controller.flush()
        if log:
            log.logframe(gamestate)
            log.writeframe()
コード例 #14
0
ファイル: chatbot.py プロジェクト: IAmOZRules/PyBot
if __name__ == "__main__":
    if args.gui:
        from gui import ChatApplication
        app = ChatApplication()
        app.run()

    if args.cli:
        from chat import bot_name, get_response
        from common import *
        print(greeting)

        while True:
            print(you, end="")
            sentence = input()
            if sentence == "exit" or sentence == "goodbye":
                print(
                    f'{bot_name}: Thank you for visiting! I hope to see you again!\n'
                )
                break

            response, _ = get_response(sentence)
            print(f'{bot_name}: {response}\n')

    if args.voice:
        from voice import VoiceChat
        VoiceChat()

    if args.train:
        from train import trainModel
        trainModel(args.train)
コード例 #15
0
import numpy as np
from train import trainModel

input_path = 'mirek_data_2/2019_instances_version_2.npy'
target_path = 'mirek_data_2/2019_labels_version_2.npy'

param_grid = {
    'reg__max_depth': np.arange(5, 20, 5),
    'reg__n_estimators': np.arange(40, 120, 10)
}

# Model names:
#   - XGBRegressor ...............'XGBoost'
#   - RandomForestRegressor ......'RandomForest'
trainModel(input_path,
           target_path,
           grid=param_grid,
           model_name='XGBoost',
           min_cut=300,
           max_cut=20000000,
           cv=3,
           eval=True)
コード例 #16
0
maxiter         = 5000000       # maxmimum iteration number         
learning_rate   = 0.00001       # learning rate
epsilon_min     = 0.1           # minimum epsilon

W               = 32            # input matrix size
M               = 1000          # memory buffer capacity
B               = 10            # parameter theta  update interval               
C               = 1000          # parameter theta^* update interval ( TargetQ )
Gamma           = 0.99          # discount factor
P               = 0             # transaction panalty while training.  0.05 (%) for training, 0 for testing
Beta            = 32            # batch size
#################################################################################

# initialize
DRead           = DRL.DataReaderRL()
Model           = TR.trainModel( 1.0, epsilon_min, maxiter, Beta, B , C, learning_rate, P  )



######## Test Model ###########
'''
# folder list for testing 
folderlist                          =  DRead.get_filelist(  '../Sample_Testing/')
sess,saver, state, isTrain, rho_eta = Model.TestModel_ConstructGraph( W,W,FSize,PSize,PStride,NumAction )

for i in range ( 0, len( folderlist) ):

    print folderlist[i]
   
    filepathX       =   folderlist[i] + 'inputX.txt'
    filepathY       =   folderlist[i] + 'inputY.txt' 
コード例 #17
0
import argparse
from train import trainModel
from test import testModel

def parseArguments():
    parser = argparse.ArgumentParser()
    parser.add_argument('-t',
                        '--train',
                        action='store_true',
                        help='train the model',
                        dest='train')

    return parser.parse_args()

if __name__ == '__main__':
    args = parseArguments()

    if args.train:
        trainModel()
    else:
        testModel()
コード例 #18
0
ファイル: run.py プロジェクト: vasilnv/NLP-Deep-Learning
    testCorpus, trainCorpus, char2id =  utils.prepareData(corpusFileName, startChar, endChar, unkChar, padChar)
    pickle.dump(testCorpus, open(testDataFileName, 'wb'))
    pickle.dump(trainCorpus, open(trainDataFileName, 'wb'))
    pickle.dump(char2id, open(char2idFileName, 'wb'))
    print('Data prepared.')

if len(sys.argv)>1 and sys.argv[1] == 'train':
    testCorpus = pickle.load(open(testDataFileName, 'rb'))
    trainCorpus = pickle.load(open(trainDataFileName, 'rb'))
    char2id = pickle.load(open(char2idFileName, 'rb'))

    lm = model.LSTMLanguageModelPack(char_emb_size, hid_size, char2id, unkChar, padChar, endChar, lstm_layers=lstm_layers, dropout=dropout).to(device)
    if len(sys.argv)>2: lm.load(sys.argv[2])

    optimizer = torch.optim.Adam(lm.parameters(), lr=learning_rate)
    train.trainModel(trainCorpus, lm, optimizer, epochs, batchSize)
    lm.save(modelFileName)
    print('Model perplexity: ',train.perplexity(lm, testCorpus, batchSize))

if len(sys.argv)>1 and sys.argv[1] == 'perplexity':
    testCorpus = pickle.load(open(testDataFileName, 'rb'))
    char2id = pickle.load(open(char2idFileName, 'rb'))
    lm = model.LSTMLanguageModelPack(char_emb_size, hid_size, char2id, unkChar, padChar, endChar, lstm_layers=lstm_layers, dropout=dropout)
    lm.load(modelFileName)
    print('Model perplexity: ',train.perplexity(lm, testCorpus, batchSize))

if len(sys.argv)>1 and sys.argv[1] == 'generate':
    if len(sys.argv)>2: seed = sys.argv[2]
    else: seed = startChar

    assert seed[0] == startChar
コード例 #19
0
import numpy as np
from train import trainModel

input_path = "data/final_dataset.csv"
target_path = "data/salary_final.csv"

test_input_path = "data/final_dataset.csv"
test_target_path = "data/salary_final.csv"

param_grid = {
    'reg__max_depth': np.arange(4, 16, 4),
    'reg__n_estimators': np.arange(20, 100, 20)
}

# Model names:
#   - XGBRegressor ...............'XGBoost'
#   - RandomForestRegressor ......'RandomForest'
trainModel(input_path,
           target_path,
           test_input_path,
           test_target_path,
           grid=param_grid,
           model_name='XGBoost',
           min_cut=100,
           max_cut=700000,
           cv=4,
           eval=True)
コード例 #20
0
ファイル: run.py プロジェクト: sbengali/KaggleProblem
# load data
import train as tr
from sklearn.cross_validation import KFold

if __name__ == '__main__':
    train, encoder = tr.loadTrainSet()
    cv = KFold(train.shape[0], n_folds=8, shuffle=True)
    # Classify the data and pass the parameters to the training model
    train_model = tr.trainModel(tr.classification_pipe, train.ingredients, train.cuisine, cv, n_jobs=-1)

    test = tr.loadTestSet()
    test['cuisine'] = train_model.predict(test.ingredients)
    test['cuisine'] = encoder.inverse_transform(test['cuisine'])
    #print test['cuisine']
    test[['id', 'cuisine']].to_csv("../output/prediction.csv", index=False)