示例#1
0
def test():
    from Trainer import Trainer
    from dataset.mnist import load_mnist
    (x_train, t_train), (x_test, t_test) = load_mnist(flatten=False);

    max_epochs = 1;

    network = SimpleConvNet(input_dim=(1, 28, 28),
                            conv_params={'filter_num': 30, 'filter_size': 5, 'padding': 0, 'stride': 1},
                            hidden_size=100, output_size=10, weight_init_std=0.01);

    trainer = Trainer(network, x_train, t_train, x_test, t_test, epochs=max_epochs,
                      mini_batch_size=100, optimizer='Adam', optimizer_param={'lr': 0.001},
                      evaluate_sample_num_per_epoch=1000);

    # network.showNetwork();

    print("======================================================================")
    print("Max Iteration : {}".format(trainer.max_iter));
    print("======================================================================")

    trainer.train();

    # Save the trained parameters
    print("save .........................")
    network.save_params("ttt.pkl");
    print("Saved network parameters!!!");
示例#2
0
def runTrain(nnArchitecture=None):

    timestampTime = time.strftime("%H%M%S")
    timestampDate = time.strftime("%d%m%Y")
    timestampLaunch = timestampDate + '-' + timestampTime

    TrainPath = nnArchitecture['TrainPath']
    ValidPath = nnArchitecture['ValidPath']
    nnClassCount = nclasses

    #---- Training settings: batch size, maximum number of epochs
    trBatchSize = 4
    trMaxEpoch = 25
    transResize = 256
    transCrop = 224
    learningRate = 0.0001
    print('Training NN architecture = ', nnArchitecture['name'])

    info_dict = {
        'batch_size': trBatchSize,
        'architecture': nnArchitecture['name'],
        'number of epochs': trMaxEpoch,
        'learningRate': learningRate,
        'train path': TrainPath,
        'valid_path': ValidPath,
        'number of classes': nclasses,
        'Date-Time': timestampLaunch
    }

    with open('../models/config' + str(timestampLaunch) + '.txt',
              'w') as outFile:
        json.dump(info_dict, outFile)
    Trainer.train(TrainPath, ValidPath, nnArchitecture, nnClassCount,
                  transResize, transCrop, trBatchSize, trMaxEpoch,
                  learningRate, timestampLaunch, nnArchitecture['ckpt'])
def runTrain(nnArchitecture = None):

	timestampTime = time.strftime("%H%M%S")
	timestampDate = time.strftime("%d%m%Y")
	timestampLaunch = timestampDate + '-' + timestampTime

	TrainPath = nnArchitecture['TrainPath']
	ValidPath = nnArchitecture['ValidPath']

	nnClassCount = nclasses

	#---- Training settings: batch size, maximum number of epochs
	trBatchSize = 4
	trMaxEpoch = 60*4

	print ('Training NN architecture = ', nnArchitecture['name'])
	info_dict = {
				'batch_size': trBatchSize,
				'architecture':nnArchitecture['name'] ,
				'number of epochs':trMaxEpoch,
				'train path':TrainPath, 
				'valid_path':ValidPath,
				'number of classes':nclasses,
				'Date-Time':	timestampLaunch
	} 
	if not os.path.exists('../modelsclaheWC11'): os.mkdir('../modelsclaheWC11')
	with open('../modelsclaheWC11/config.txt','w') as outFile:
		json.dump(info_dict, outFile)
	

	Trainer.train(TrainPath,  ValidPath, nnArchitecture, nnClassCount, trBatchSize, trMaxEpoch, timestampLaunch, nnArchitecture['ckpt'])
示例#4
0
def main(datadir, dataset_start_idx, dataset_end_idx, data_use_num,
         sp=256, first_layer_ch=24,
         batch_size=8, epoch_num=40,
         discriminator_out_res=32,
         mode='retrain', pre_model_dir=None):
    root_dir = os.getcwd()
    graph_dir = os.path.join(root_dir, 'graph%dp' % sp)
    results_dir = os.path.join(root_dir, 'results%dp' % sp)
    code_bk_dir = os.path.join(results_dir, 'code_bk')
    if mode == 'retrain' or mode == 'finetune':
        import shutil
        if os.path.exists(graph_dir): shutil.rmtree(graph_dir)
        if os.path.exists(results_dir): shutil.rmtree(results_dir)
        os.mkdir(results_dir)
        os.mkdir(code_bk_dir)

        # backup source code
        file_list = os.listdir(root_dir)
        for item in file_list:
            full_name = os.path.join(root_dir, item)
            if os.path.isfile(full_name) and item.endswith('.py'):
                shutil.copy(full_name, os.path.join(code_bk_dir, item))

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

    logger.set_log_file(os.path.join(results_dir, 'log.txt'))
    logger.write('Constructing network graph...')

    trainer = Trainer(sess, sp, discriminator_out_res)
    trainer.train(datadir, dataset_start_idx, dataset_end_idx, data_use_num, 24, first_layer_ch,
                  results_dir=results_dir, graph_dir=graph_dir,
                  batch_size=batch_size, epoch_num=epoch_num,
                  mode=mode, pre_model_dir=pre_model_dir)
示例#5
0
def equation_report_early_switching(eq, func_dict, weight_dict, constraints_dict, size_dict, nruns=10):
    func_set = func_dict[eq]
    tweight = weight_dict[eq]
    constraints_func = constraints_dict[eq]
    nsamples = size_dict[eq]

    trainer = Trainer(path="data//", save=True, load=True, master_file="OtherEquations.csv")
    dl = DEAPLearningSystem(func_list=func_set, ngens=15, algorithm="earlyswitcher", population_size=50)
    weightlist = [tweight for i in range(nruns)]
    no_examples = 1
    for i in range(no_examples):
        data =[]
        for weight in weightlist:
            dl.set_add_func(lambda dls, x, y : constraints_func(dls, x, y, weight=weight))
            #dl.set_lgml_func(lgml_func)
            df = trainer.predict_equations(dl, no_train_samples=nsamples, eqs=[eq], use_gens=True)
            temp = df.loc[0, :]
            temp["Weight"] = weight
            temp["MSE"] = temp["Error"][0]
            temp["Truth Error"] = temp["Error"][1]
            temp = temp.reindex(index=['Weight', 'Predicted Equation', 'MSE',"Truth Error", 'Time Taken'])
            data.append(temp)
        final_df = pd.DataFrame(data)
        final_df.set_index("Weight")
        final_df.to_csv(f"{eq}EarlySwitching{i}.csv", index=False)
示例#6
0
def main(datadir,
         dataset_start_idx,
         dataset_end_idx,
         sp=256,
         first_layer_ch=24,
         discriminator_out_res=32,
         model_dir='./results512p'):
    root_dir = os.getcwd()
    test_dir = os.path.join(root_dir, 'test%dp' % sp)
    if os.path.exists(test_dir): shutil.rmtree(test_dir)
    os.mkdir(test_dir)
    os.mkdir(os.path.join(test_dir, 'gt'))
    os.mkdir(os.path.join(test_dir, 'input'))
    os.mkdir(os.path.join(test_dir, 'output'))

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

    trainer = Trainer(sess, sp, discriminator_out_res)
    trainer.test(datadir,
                 dataset_start_idx,
                 dataset_end_idx,
                 24,
                 first_layer_ch,
                 results_dir=test_dir,
                 model_dir=model_dir)
    def __init__(self,
                 DATA_DIR,
                 ALPHA_DIR,
                 MODEL,
                 BATCH_SIZE,
                 HIDDEN_DIM,
                 SEQ_LENGTH,
                 LAYER_NUM,
                 DBG=False):

        set_debug(DBG)

        debug('[TextRNN]: Loading data...')
        X, y, VOCAB_SIZE, ix_to_char, chars = load_data(
            DATA_DIR, ALPHA_DIR, SEQ_LENGTH)

        debug('[TextRNN]: Creating ModelHandler...')
        self.modelhandler = ModelHandler(HIDDEN_DIM, VOCAB_SIZE, LAYER_NUM,
                                         MODEL)
        debug('[TextRNN]: Loading model...')
        self.model = self.modelhandler.load()

        debug('[TextRNN]: Creating Trainer...')
        self.trainer = Trainer(MODEL, self.model, X, y, VOCAB_SIZE, ix_to_char,
                               chars, BATCH_SIZE)
class TextRNN:
    def __init__(self,
                 DATA_DIR,
                 ALPHA_DIR,
                 MODEL,
                 BATCH_SIZE,
                 HIDDEN_DIM,
                 SEQ_LENGTH,
                 LAYER_NUM,
                 DBG=False):

        set_debug(DBG)

        debug('[TextRNN]: Loading data...')
        X, y, VOCAB_SIZE, ix_to_char, chars = load_data(
            DATA_DIR, ALPHA_DIR, SEQ_LENGTH)

        debug('[TextRNN]: Creating ModelHandler...')
        self.modelhandler = ModelHandler(HIDDEN_DIM, VOCAB_SIZE, LAYER_NUM,
                                         MODEL)
        debug('[TextRNN]: Loading model...')
        self.model = self.modelhandler.load()

        debug('[TextRNN]: Creating Trainer...')
        self.trainer = Trainer(MODEL, self.model, X, y, VOCAB_SIZE, ix_to_char,
                               chars, BATCH_SIZE)

    def train(self, epochs=50):
        debug('[TextRNN]: Training {} times...'.format(epochs))
        self.trainer.train(epochs)

    def generate(self, length, initx):
        debug('[TextRNN]: Generating {} characters...'.format(length))
        return self.trainer.generate(length, initx)
def train_model():
    print('Training')
    trainer = Trainer()
    with keras.backend.get_session().graph.as_default():
        e = emb()
        trainer.train(e)
    return redirect(url_for('admin'))
示例#10
0
	def __init__(self, session=None, arguments=None):

		self.sess = session
		self.args = arguments

		# Initialize Gym environment.
		self.environment = gym.make(self.args.env)        

		if self.args.env=='MountainCarContinuous-v0':
			input_dimensions = 2
			output_dimensions = 1
		elif self.args.env=='InvertedPendulum-v2':
			input_dimensions = 4
			output_dimensions = 1
		elif self.args.env=='FetchReach-v0':
			input_dimensions = 16
			output_dimensions = 4
		elif self.args.env=='FetchPush-v0':
			input_dimensions = 31
			output_dimensions = 4

		# Initialize a polivy network. 
		self.ACModel = ActorCriticModel(input_dimensions,output_dimensions,number_layers=4,hidden_units=40,sess=session,to_train=self.args.train, env=self.args.env)

		# Create the actual network
		if self.args.weights:
			self.ACModel.create_policy_network(session, pretrained_weights=self.args.weights,to_train=self.args.train)
		else:
			self.ACModel.create_policy_network(session, to_train=self.args.train)

		# Initialize a memory replay. 
		self.memory = ReplayMemory()

		# Create a trainer instance. 
		self.trainer = Trainer(sess=session,policy=self.ACModel, environment=self.environment, memory=self.memory,args=self.args)
def train_model(songs_data):
    """Input: list of data on several songs (could be a single song)
       Ouput: a model trained on all of the songs"""
    bars = get_bars(songs_data)
    trainer = Trainer(bars)
    m, alphabet = trainer.train()
    return (m, alphabet)
示例#12
0
def train(NUM_EPOCHS):
    Trainer.train(NUM_EPOCHS, dataset, checkpoint, checkpoint_prefix, encoder,
                  decoder, optimizer)
    rez = f'trained for {NUM_EPOCHS}'
    response = jsonify(rez)
    response.status_code = 202
    return response
示例#13
0
def main(args):

    print("Loading Training Data:", args.training)
    print("args.featureName:", args.features)
    print("args.PCA:", args.PCA)
    print("args.features:", args.features)
    print("args.imbalance:", args.imbalance)
    print("args.network:", args.network)
    print("args.LR:", args.LR)
    print("args.VLAD_k:", args.VLAD_k)
    print("args.max_epoch:", args.max_epoch)
    print("flush!", flush=True)

    from Dataset import dataset
    my_dataset = dataset()
    my_dataset.loadTrainingDataset(
        path_data=args.training,
        featureName=args.features,
        PCA=args.PCA,
        imbalance=args.imbalance,
        batch_size=args.batch_size,
        window_size_sec=args.WindowSize,
    )
    my_dataset.loadValidationDataset(
        path_data=args.validation,
        featureName=args.features,
        PCA=args.PCA,
        window_size_sec=args.WindowSize,
    )
    my_dataset.loadTestingDataset(
        path_data=args.testing,
        featureName=args.features,
        PCA=args.PCA,
        window_size_sec=args.WindowSize,
    )

    # define Network
    from Network import networkMinutes
    my_network = networkMinutes(my_dataset, args.network, VLAD_k=args.VLAD_k)

    # define Trainer
    from Trainer import Trainer
    my_trainer = Trainer(my_network, my_dataset)
    vals_train, vals_valid, vals_test, model = my_trainer.train(
        epochs=args.max_epoch, learning_rate=args.LR, tflog=args.tflog)
    #vals_train, vals_valid, vals_test, model = 0,0,0,"pippo"
    if (".csv" in args.csv_file and args.jobid >= 0
            and ("BUTTA" not in args.tflog.upper())):
        print("saving results to csv file")
        df = pd.read_csv(args.csv_file, index_col=0)
        df.set_value(args.jobid, "train_mAP", vals_train["mAP"])
        df.set_value(args.jobid, "train_Acc", vals_train["accuracy"])
        df.set_value(args.jobid, "valid_mAP", vals_valid["mAP"])
        df.set_value(args.jobid, "valid_Acc", vals_valid["accuracy"])
        df.set_value(args.jobid, "test_mAP", vals_test["mAP"])
        df.set_value(args.jobid, "test_Acc", vals_test["accuracy"])
        print(model)
        df.set_value(args.jobid, "model", model)
        df.to_csv(args.csv_file, sep=',', encoding='utf-8')
示例#14
0
def test_agent_solve_bit_flipping_game():
    AGENTS = [PPO, DDQN, DQN_With_Fixed_Q_Targets, DDQN_With_Prioritised_Experience_Replay, DQN, DQN_HER]
    trainer = Trainer(config, AGENTS)
    results = trainer.run_games_for_agents()
    for agent in AGENTS:
        agent_results = results[agent.agent_name]
        agent_results = np.max(agent_results[0][1][50:])
        assert agent_results >= 0.0, "Failed for {} -- score {}".format(agent.agent_name, agent_results)
示例#15
0
文件: main.py 项目: storgi/ASP
def loadTrainer(load=bool, depth=int):
    trainer = Trainer()
    if load:
        trainer.initialize(depth)
    else:
        trainer = trainer.restore("TrainerDepth", depth)
        
    #trainer.dictionary.printDepth(3)
    return trainer
    def __init__(self, hidden_size, model_path, interface="Ethernet"):
        self.interface = interface
        self.action_num = 2

        self.trainer = Trainer(hidden_size, Behavior.TEACH, Mode.HYBRID)
        self.detector = DetectionSystem(hidden_size, interface)

        self.trainer.load_model(model_path)
        self.detector.load_model(model_path)
示例#17
0
def start_squats(source=None, vid=None):
    from Squats import Squats
    from Trainer import Trainer
    print(source, vid)
    cpu = True if processor == "cpu" else False
    frame_provider = get_frameProvider(source, vid)
    squats = Squats()
    trainer = Trainer(frame_provider, squats, net)
    trainer.start_training(cpu)
示例#18
0
文件: Engine.py 项目: insomnia94/ARG
  def __init__(self, config, vgg, negative_actor, negative_critic, positive_actor, positive_critic, img_path_seqs, name_seqs):
    self.current_path = []
    self.name_seqs = name_seqs
    self.img_path_seqs = img_path_seqs
    self.vgg = vgg
    self.negative_actor = negative_actor
    self.negative_critic = negative_critic
    self.positive_actor = positive_actor
    self.positive_critic = positive_critic
    self.config = config
    self.dataset = config.unicode("dataset").lower()
    self.load_init = config.unicode("load_init", "") # the second augument is the default value
    self.load = config.unicode("load", "")
    self.task = config.unicode("task", "train")
    self.use_partialflow = config.bool("use_partialflow", False)
    self.do_oneshot_or_online_or_offline = self.task in ("oneshot_forward", "oneshot", "online", "offline")
    if self.do_oneshot_or_online_or_offline:
      assert config.int("batch_size_eval", 1) == 1
    self.need_train = self.task == "train" or self.do_oneshot_or_online_or_offline or self.task == "forward_train"
    self.session = tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True))
    self.coordinator = tf.train.Coordinator()
    self.valid_data = load_dataset(config, "valid", self.session, self.coordinator)
    if self.need_train:
      self.train_data = load_dataset(config, "train", self.session, self.coordinator)

    self.num_epochs = config.int("num_epochs", 1000)
    self.model = config.unicode("model")
    self.model_base_dir = config.dir("model_dir", "models")
    self.model_dir = self.model_base_dir + self.model + "/"
    self.save = config.bool("save", True)

    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    self.start_epoch = 0
    reuse_variables = None
    if self.need_train:
      freeze_batchnorm = config.bool("freeze_batchnorm", False)
      self.train_network = Network(config, self.train_data, self.global_step, training=True,
                                   use_partialflow=self.use_partialflow,
                                   do_oneshot=self.do_oneshot_or_online_or_offline,
                                   freeze_batchnorm=freeze_batchnorm, name="trainnet")
      reuse_variables = True
    else:
      self.train_network = None
    with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
      self.test_network = Network(config, self.valid_data, self.global_step, training=False,
                                  do_oneshot=self.do_oneshot_or_online_or_offline, use_partialflow=False,
                                  freeze_batchnorm=True, name="testnet")
    print >> log.v1, "number of parameters:", "{:,}".format(self.test_network.n_params)
    self.trainer = Trainer(config, self.train_network, self.test_network, self.global_step, self.session)
    self.saver = tf.train.Saver(max_to_keep=0, pad_step_number=True)
    tf.global_variables_initializer().run()
    tf.local_variables_initializer().run()
    tf.train.start_queue_runners(self.session)
    self.load_init_saver = self._create_load_init_saver()
    if not self.do_oneshot_or_online_or_offline:
      self.try_load_weights()
示例#19
0
def start_pushup(source=None, vid=None):
    print(source, vid)
    frame_provider = get_frameProvider(source, vid)

    cpu = True if processor == "cpu" else False
    from Trainer import Trainer
    from Pushup import Pushup
    pushup = Pushup()
    trainer = Trainer(frame_provider, pushup, net)
    trainer.start_training(cpu)
示例#20
0
def start_bicepCurl(source=None, vid=None):
    print(source, vid)
    frame_provider = get_frameProvider(source, vid)

    cpu = True if processor == "cpu" else False
    from Trainer import Trainer
    from BicepCurl import BicepCurl
    bicepcurl = BicepCurl()
    trainer = Trainer(frame_provider, bicepcurl, net)
    trainer.start_training(cpu)
    def compute_base_parameter(self):

        train_binary = Trainer(self.base_image)
        train_binary = train_binary.convert_image_to_binary()
        temp = Labeling(train_binary)
        self.image_binary = temp.get_image()
        self.label_map = temp.get_record()
        self.label_num = temp.get_count()
        temp = Img_Property(self.base_image, 0, self.label_map, self.label_num)
        self.descriptors_base = temp.get_character_property()
示例#22
0
class Pet:
    def __init__(self, energy=130, energyPerGame=12):
        self.energy = energy
        self.energyPerGame = energyPerGame
        self.numPlayableGames = self.energy // self.energyPerGame
        self.trainer = Trainer()

    def train(self):
        for i in range(self.numPlayableGames):
            self.trainer.train()
示例#23
0
def run(arguments):

    parser = argparse.ArgumentParser(description='Perform linear GP evolution for a given environment.')

    # Program configuration
    parser.add_argument('--minps', dest='min_prog_size', type=int, help='Minimum number of instructions per Program', default=32)
    parser.add_argument('--maxps', dest='max_prog_size', type=int, help='Maximum number of instructions per Program', default=1024)
    parser.add_argument('--padd', dest='padd', type=float, help='Instruction addition strength', default=0.7)
    parser.add_argument('--pdel', dest='pdel', type=float, help='Instruction deletion strength', default=0.7)
    parser.add_argument('--pmut', dest='pmut', type=float, help='Instruction mutation strength', default=0.7)

    # Trainer configuration
    parser.add_argument('--generations', dest='num_generations', type=int, help='Number of generations over which evolution is performed', default=50)
    parser.add_argument('--pop', dest='population_size', type=int, help='Learner population size', default=200)
    parser.add_argument('--keep', dest='percent_keep', type=float, help='Percentage of surviving Learners', default=0.3)
    parser.add_argument('--fast', dest='fast_mode', type=bool, help='Skip some re-evaluations', default=True)
    parser.add_argument('--skips', dest='num_skips', type=int, help='Number of generations over which to skip re-evaluation', default=3)
    parser.add_argument('--episodes', dest='num_eps_per_gen', type=int, help='Number of episodes over which an agent is evaluated each generation', default=3)
    parser.add_argument('--verbose', dest='verbose', type=bool, help='Do print out info to the console during evolution', default=True)
    parser.add_argument('--agent', dest='agent_save_name', type=str, help='Name under which to save the evolved agent', default="")

    # Environment configuration
    parser.add_argument('--env', dest='env', type=str, help='OpenAI environment', default="CartPole-v1")
    parser.add_argument('--statespace', dest='statespace', type=int, help='Length of flattened state space', default=4)
    args = parser.parse_args(arguments)

    if args.env != "CartPole-v0" and args.env != "CartPole-v1":
        print("Woops! So far this module only works in the CartPole environment!")
        return

    if args.statespace != 4:
        print("Woops! So far this module only works in the CartPole environment with a statespace size of 4!")
        return

    ConfigureProgram(
        num_inputs      = args.statespace,
        min_prog_size   = args.min_prog_size,
        max_prog_size   = args.max_prog_size,
        p_add           = args.padd,
        p_del           = args.pdel,
        p_mut           = args.pmut)

    ConfigureTrainer(
        num_generations     = args.num_generations,
        population_size     = args.population_size,
        percent_keep        = args.percent_keep,
        fast_mode           = args.fast_mode,
        max_num_skips       = args.num_skips,
        num_eps_per_gen     = args.num_eps_per_gen,
        verbose             = args.verbose,
        agent_save_name     = args.agent_save_name)

    env = gym.make(args.env)
    trainer = Trainer(env)
    trainer.evolve()
示例#24
0
def test_agents_can_play_games_of_different_dimensions():

    config.num_episodes_to_run = 10
    config.hyperparameters["DQN_Agents"]["batch_size"] = 3
    AGENTS = [A2C, A3C, PPO, DDQN, DQN_With_Fixed_Q_Targets, DDQN_With_Prioritised_Experience_Replay, DQN]
    trainer = Trainer(config, AGENTS)
    config.environment = gym.make("CartPole-v0")
    results = trainer.run_games_for_agents()
    for agent in AGENTS:
        assert agent.agent_name in results.keys()

    AGENTS = [TD3, PPO, DDPG]
    config.environment = gym.make("MountainCarContinuous-v0")
    trainer = Trainer(config, AGENTS)
    results = trainer.run_games_for_agents()
    for agent in AGENTS:
        assert agent.agent_name in results.keys()

    AGENTS = [DDQN, SNN_HRL]
    config.environment = Four_Rooms_Environment(15, 15, stochastic_actions_probability=0.25,
                                                random_start_user_place=True, random_goal_place=False)
    trainer = Trainer(config, AGENTS)
    results = trainer.run_games_for_agents()
    for agent in AGENTS:
        assert agent.agent_name in results.keys()
def train_models():
    start = timeit.default_timer()
    trainer = Trainer()
    tmp_dir = trainer.train()
    stop = timeit.default_timer()
    # reload models
    predictor.reload(tmp_dir=tmp_dir)
    time = int(stop - start)
    logging.info("Training completed! Time cost: {} min, {} seconds".format(
        str(int(time / 60)), str(time % 60)))
    return
def main():
    started = datetime.now()
    
    trainer = Trainer()
    trainer.train_MNB_classifier()
    
    finished = datetime.now()
    
    print 'Started at: ',started
    print 'Finished at: ',finished
    print 'Time taken: ',(finished-started)
示例#27
0
def main():
    started = datetime.now()

    trainer = Trainer()
    trainer.train_MNB_classifier()

    finished = datetime.now()

    print 'Started at: ', started
    print 'Finished at: ', finished
    print 'Time taken: ', (finished - started)
def train(train_data, val_data, model, file_name, lr=1e-3, write=True):
    loss = torch.nn.CrossEntropyLoss()
    opt = torch.optim.Adam(params=model.parameters(), lr=lr, weight_decay=1e-2)

    trainer = Trainer(model,
                      train_data,
                      val_data,
                      opt,
                      loss,
                      file_name=file_name,
                      save_data=write)
    return trainer.train()
示例#29
0
def problem_3_pureDataGenerator():
    l = 10
    n = 1000
    for m in [100, 500, 1000]:
        t = Trainer()
        train = t.data_generator(l=l,
                                 m=m,
                                 n=n,
                                 number_of_instances=50000,
                                 noise=False)
        np.save('p3pureX_m=%s' % m, train['x'])
        np.save('p3pureY_m=%s' % m, train['y'])
示例#30
0
 def test_model_get_saved(self):
     with self.test_session():
         model = OurModel(0)
         trainer = Trainer(model.model,
                           filepath + "Train",
                           filepath + "Valid",
                           filepath + "Test",
                           identifier=0,
                           epochs=1,
                           save_model=True)
         trainer.train()
         self.assertEqual(True, os.path.exists(trainer.saved_model_path))
示例#31
0
def problem_3_tuning():
    algorithmList = [
        'Perceptron', 'Perceptron with margin', 'Winnow', 'Winnow with margin',
        'AdaGrad'
    ]
    for m in [100, 500, 1000]:
        print()

        d = problem_3_dataLoader(m, 'train')
        x, y = d['x'], d['y']
        D1_x, D1_y, D2_x, D2_y = d['D1_x'], d['D1_y'], d['D2_x'], d['D2_y']
        t = Trainer()
        t.set_param(l=10, m=m, n=x.shape[1], number_of_instances=x.shape[0])
        initDict = initDictGenerator(n=t.n)
        for algorithm in algorithmList:
            algorithmInit = initDict[algorithm]
            learningRateList = algorithmInit['learning rate']
            marginList = algorithmInit['margin']
            t.learning(algorithm, D1_x, D1_y, initDict=initDict, times=20)
            for lr in learningRateList:
                for mg in marginList:
                    err_rate = t.error_estimate(D2_x, D2_y, lr, mg)
                    mistake = t.mistakeCount(lr, mg)
                    print(
                        'LR: {0: >6s}, MG: {1: >6s}, ER: {2: >6s}, Mis: {3: >6s}'
                        .format(str(lr), str(mg), str(err_rate), str(mistake)))
示例#32
0
def training():
    trainer = Trainer()
    trainer.Train()

    shutil.rmtree("face")
    os.mkdir("face")

    lbl_registered.destroy()
    lbl_success = tk.Label(window,
                           text="You`re succesfully registered!",
                           font=("Arial", 18),
                           bg="white")
    lbl_success.place(x=132, y=170)
    window.update()
示例#33
0
def main():
    GPU_USE = 0
    DEVICE = 'cuda:1'  # 0 : gpu0, 1 : gpu1, ...

    TEST_DIR = './dataset/spectrogram/Test/S_Neutral'
    RESULT_DIR = './result2'
    parser = argparse.ArgumentParser()

    parser.add_argument('--test_dir',
                        type=str,
                        default=TEST_DIR,
                        help='log directory')
    parser.add_argument('--result_dir',
                        type=str,
                        default=RESULT_DIR,
                        help='log directory')
    parser.add_argument('--device',
                        type=str,
                        default=DEVICE,
                        help='which device?')
    parser.add_argument('--gpu_use',
                        type=int,
                        default=GPU_USE,
                        help='GPU enable? 0 : cpu, 1 : gpu')

    args = parser.parse_args()

    os.makedirs(args.result_dir, exist_ok=True)

    if args.gpu_use == 1 and torch.cuda.is_available():
        device = torch.device(args.device)
    elif args.gpu_use == 0:
        device = torch.device('cpu')

    model = CCVAE2().to(device=device)
    checkpoint = torch.load('log3/checkpoint_epoch000002000.pth')
    model.load_state_dict(checkpoint['state_dict'])

    trainer = Trainer(model=model, device=device, args=args)
    with open(join(args.test_dir, 'train.txt'), encoding='utf-8') as f:
        for line in f:
            parts = line.strip().split('|')
            spectrum_path = join(args.result_dir, parts[0])
            wav_path = join(args.result_dir, parts[0].replace('.npy', '.wav'))
            source = np.load(join(args.test_dir, parts[0]))
            source_X = source[:, :-1]
            start_time = time.time()
            trainer.test(source_X, spectrum_path, wav_path)
            print("--- %s seconds spent ---" % (time.time() - start_time))
            print('%s' % parts[0])
示例#34
0
from GameReplay import replay
import Draw
from OneLayer import OneLayer
from Trainer import Trainer

board_size = 5
learning_rate = 0.1

model = OneLayer(board_size)
trainer = Trainer(model, learning_rate)
trainer.train(1000)

def show_game(random_first = True):
    moves = trainer.play_game(True, random_first)
    replay(moves)

def draw_weights(player, row, filename):
    weights = model.params[(player, "weights")].get_value()
    Draw.draw(weights[row], board_size, board_size, filename)
h_pool1_flat = tf.reshape(h_pool1, [-1, 120 * 160 * 32])
h_fc1 = tf.nn.relu(tf.matmul(h_pool1_flat, W_fc1) + b_fc1)

dropout_keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, dropout_keep_prob)

W_fc2 = weight_variable('layer3',[512, 3])
b_fc2 = bias_variable('layer3',[3])

logits = tf.add(tf.matmul(h_fc1_drop, W_fc2), b_fc2, name='logits')

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_))
train_step = tf.train.AdamOptimizer(1e-4,name='train_step').minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='accuracy')

model_file = os.path.dirname(os.path.realpath(__file__)) + '/' + os.path.basename(__file__)
trainer = Trainer(data_path=data_path,
                  model_file=model_file,
                  s3_bucket=s3_bucket,
                  epochs=epochs,
                  max_sample_records=100,
                  show_speed=show_speed,
                  s3_sync=s3_sync)

trainer.train(sess=sess, x=x, y_=y_,
              accuracy=accuracy,
              train_step=train_step,
              train_feed_dict={dropout_keep_prob:1.0},
              test_feed_dict={dropout_keep_prob:1.0})
 def runWithoutWndchrm(self):
     tr = Trainer(load=False, loadWndchrm=False)
     tr.runWithoutWndchrm()
     pr = Predictor(load=False, loadWndchrm=False)
     pr.runWithoutWndchrm()
 def run(self):
     tr = Trainer(load=False, loadWndchrm=False)
     tr.run()
     pr = Predictor(load=False, loadWndchrm=False)
     return pr.run()
示例#38
0
second_team_config = [{"id": "728", "moves": ["449", "404", "19", "304"]},  # Bug
                     {"id": "729", "moves": ["449", "247", "421", "70"]},   # Ghost
                     {"id": "730", "moves": ["449", "430", "247", "19"]},   # Steel
                     {"id": "731", "moves": ["449", "53", "280", "58"]},    # Fire
                     {"id": "732", "moves": ["449", "57", "127", "89"]},    # Water
                     {"id": "733", "moves": ["449", "412", "404", "280"]}]  # Grass

third_team_config = [{"id": "734", "moves": ["449", "85", "87", "398"]},    # Electric
                     {"id": "735", "moves": ["449", "94", "414", "85"]},    # Psychic
                     {"id": "736", "moves": ["449", "58", "59", "70"]},     # Ice
                     {"id": "737", "moves": ["449", "337", "280", "19"]},   # Dragon
                     {"id": "738", "moves": ["449", "399", "430", "449"]},  # Dark
                     {"id": "739", "moves": ["449", "15", "85", "414"]}]    # Fairy

first_team = Trainer(manual=False)
second_team = Trainer(manual=False)
third_team = Trainer(manual=False)

first_team.createTeamFromListOfDicts(first_team_config)
second_team.createTeamFromListOfDicts(second_team_config)
third_team.createTeamFromListOfDicts(third_team_config)

opposing_team_list = [first_team, second_team, third_team]

pg = PokeGA(opposing_team_list)


experiment_dir = 'Experiment-' + datetime.datetime.now().strftime("%m-%d-%Y_%H:%M:%S")
os.mkdir(experiment_dir)
results_file = open(experiment_dir + '/Experiment_Run_Results.txt', 'w')
示例#39
0
文件: start.py 项目: mprannoy/HFO
def main(args, team1='left', team2='right', rng=numpy.random.RandomState()):
  """Sets up the teams, launches the server and monitor, starts the
  trainer.
  """
  if not os.path.exists(args.logDir):
    os.makedirs(args.logDir)
  num_agents   = args.offenseAgents + args.defenseAgents
  binary_dir   = os.path.dirname(os.path.realpath(__file__))
  server_port  = args.port + num_agents
  coach_port   = args.port + num_agents + 1
  olcoach_port = args.port + num_agents + 2
  serverOptions = ' server::port=%i server::coach_port=%i ' \
                  'server::olcoach_port=%i server::coach=1 ' \
                  'server::game_logging=%i server::text_logging=%i ' \
                  'server::game_log_dir=%s server::text_log_dir=%s '\
                  'server::synch_mode=%i ' \
                  'server::fullstate_l=%i server::fullstate_r=%i' \
                  %(server_port, coach_port, olcoach_port,
                    args.logging, args.logging,
                    args.logDir, args.logDir,
                    args.sync,
                    args.fullstate, args.fullstate)
  team1, team1Cmd = getAgentDirCmd(binary_dir, team1, server_port, coach_port,
                                   args.logDir, args.record)
  team2, team2Cmd = getAgentDirCmd(binary_dir, team2, server_port, coach_port,
                                   args.logDir, args.record)
  try:
    # Launch the Server
    server = launch(SERVER_CMD + serverOptions, name='server')
    time.sleep(0.2)
    assert server.poll() is None,\
      '[start.py] Failed to launch Server with command: \"%s\"' \
      %(SERVER_CMD + serverOptions)
    if not args.headless:
      monitorOptions = ' --port=%i'%(server_port)
      launch(MONITOR_CMD + monitorOptions, name='monitor')
    # Launch the Trainer
    from Trainer import Trainer
    trainer = Trainer(args=args, rng=rng, server_port=server_port,
                      coach_port=coach_port)
    trainer.initComm()
    # Start Team1
    launch(team1Cmd,False)
    trainer.waitOnTeam(True) # wait to make sure of team order
    # Start Team2
    launch(team2Cmd,False)
    trainer.waitOnTeam(False)
    # Make sure all players are connected
    trainer.checkIfAllPlayersConnected()
    trainer.setTeams()
    # Run HFO
    trainer.run(necProcesses)
  except KeyboardInterrupt:
    print '[start.py] Exiting for CTRL-C'
  finally:
    print '[start.py] Cleaning up server and other processes'
    for p in processes:
      try:
        p.send_signal(SIGKILL)
      except:
        pass
      time.sleep(0.1)
def train_model(songs_data, tempo):
    durations = get_durations(songs_data, tempo)
    dur_trainer = Trainer(durations)
    dur_model, dur_alphabet = dur_trainer.train()
    return (dur_model, dur_alphabet)
示例#41
0
import matplotlib.pyplot as plt
import numpy as np
from NeuralNetwork import NeuralNetwork
from Trainer import Trainer

X = np.array(([3, 5], [5, 1], [10, 2]), dtype=float)
y = np.array(([75], [82], [93]), dtype=float)

# Normalize
X = X / np.amax(X, axis=0)
y = y / 100  # Max test score is 100

NN = NeuralNetwork()
T = Trainer(NN)
T.train(X, y)

plt.plot(T.J)
plt.grid(1)
plt.ylabel('Cost')
plt.xlabel('Iterations')
plt.show()
#Testing Data:
testX = np.array(([4, 5.5], [4.5,1], [9,2.5], [6, 2]), dtype=float)
testY = np.array(([70], [89], [85], [75]), dtype=float)

#Normalize:
trainX = trainX/np.amax(trainX, axis=0)
trainY = trainY/100 #Max test score is 100

#Normalize:
testX = testX/np.amax(testX, axis=0)
testY = testY/100 #Max test score is 100

#Train network with new data:
NN = ANN(Lambda=0.0001)

T = Trainer(NN)
T.train(trainX, trainY, testX, testY)

  
# yHat = NN.forward(X)
# print(yHat)
# print(y)

# print(NN.W1)
# print()
# print(NN.W2)
# print()
# print(NN.W1**2)
# print()
# print(NN.W2**2)
# print()
示例#43
0
def start(job_id, dataset_id=None, server_id='local', insights=False, insights_sample_path=None):
    """
    Starts the training process with all logging of a job_id
    """

    aetros_backend = AetrosBackend(job_id)

    if '/' in job_id:
        print("...")
        job_id = aetros_backend.create_job(job_id, server_id=server_id, dataset_id=dataset_id, insights=insights)
        if job_id is None:
            exit(1)

        print("Training '%s' created and started. Open http://%s/trainer/app?training=%s to monitor the training." %
              (job_id, aetros_backend.host, job_id))
    else:
        print("Training '%s' restarted. Open http://%s/trainer/app?training=%s to monitor the training." %
              (job_id, aetros_backend.host, job_id))

    aetros_backend.job_id = job_id
    job = aetros_backend.get_job()

    if job is None or job == 'Job not found':
        raise Exception('Training not found. Have you configured your token correctly?')

    if not isinstance(job, dict):
        raise Exception('Training does not exist. Make sure you created the job via AETROS TRAINER')

    if not len(job['config']):
        raise Exception('Training does not have a configuration. Make sure you created the job via AETROS TRAINER')

    network_id = job['networkId']

    aetros_backend.job_started(job_id, os.getpid())

    ensure_dir('networks/%s/%s' % (network_id, job_id))

    log = io.open('networks/%s/%s/network.log' % (network_id, job_id), 'w', encoding='utf8')
    log.truncate()

    job_model = JobModel(aetros_backend, job)
    general_logger = GeneralLogger(job, log, aetros_backend)

    print("start network ...")

    from KerasLogger import KerasLogger
    trainer = Trainer(aetros_backend, job_model, general_logger)
    keras_logger = KerasLogger(trainer, aetros_backend, job_model, general_logger)
    keras_logger.insights_sample_path = insights_sample_path
    trainer.callbacks.append(keras_logger)

    sys.stdout = general_logger
    sys.stderr = general_logger

    job['running'] = True

    monitoringThread = MonitoringThread(job, aetros_backend, trainer)
    monitoringThread.daemon = True
    monitoringThread.start()
    network.collect_system_information(trainer)

    def ctrlc(sig, frame):
        print("signal %s received\n" % id)
        raise KeyboardInterrupt("CTRL-C!")

    signal.signal(signal.SIGINT, ctrlc)

    try:
        print("Setup training")
        network.job_prepare(job)

        print("Start training")
        network.job_start(job_model, trainer, keras_logger, general_logger)

        job['running'] = False
        job_model.sync_weights()
        aetros_backend.stop_syncer()
        aetros_backend.post('job/stopped', json={'id': job_model.id, 'status': 'DONE'})

        print("done.")
        sys.exit(0)
    except KeyboardInterrupt:
        trainer.set_status('STOPPING')
        print('Early stopping ...')

        if aetros_backend.stop_requested:
            print(' ... stop requested through trainer.')

        if trainer.model:
            trainer.model.stop_training = True

        monitoringThread.stop()
        job_model.sync_weights()
        aetros_backend.stop_syncer()
        aetros_backend.post('job/stopped', json={'id': job_model.id, 'status': 'EARLY STOP'})
        print("out.")
        sys.exit(1)
    except Exception as e:
        print("Crashed ...")

        if trainer.model:
            trainer.model.stop_training = True

        log.write(unicode(traceback.format_exc()))
        logging.error(traceback.format_exc())

        monitoringThread.stop()
        aetros_backend.stop_syncer()
        aetros_backend.post('job/stopped', json={'id': job_model.id, 'status': 'CRASHED', 'error': e.message})
        print("out.")
        raise e
示例#44
0
sess = tf.InteractiveSession(config=tf.ConfigProto())

x = tf.placeholder(tf.float32, shape=[None, 240, 320, 3], name='x')
y_ = tf.placeholder(tf.float32, shape=[None, 3], name='y_')

x_shaped = tf.reshape(x, [-1, 240 * 320 * 3])

W = weight_variable('layer1',[240 * 320 * 3, 3])
b = bias_variable('layer1',[3])
logits = tf.add(tf.matmul(x_shaped, W), b, name='logits')
y = tf.nn.softmax(logits)

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4,name='train_step').minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='accuracy')

model_file = os.path.dirname(os.path.realpath(__file__)) + '/' + os.path.basename(__file__)
trainer = Trainer(data_path=data_path,
                  model_file=model_file,
                  s3_bucket=s3_bucket,
                  epochs=epochs,
                  max_sample_records=1000,
                  show_speed=show_speed,
                  s3_sync=s3_sync)

trainer.train(sess=sess, x=x, y_=y_,
              accuracy=accuracy,
              train_step=train_step,
              train_feed_dict={},
              test_feed_dict={})