예제 #1
0
def main():
    gpu = 4
    params = Params()

    if os.path.exists('tmp/results.pkl'):
        with open('tmp/results.pkl', 'rb') as f:
            result_dict = pickle.load(f)
    else:
        result_dict = defaultdict(dict)
    for experiment_set in EXPERIMENTS_LIST:
        print('Running Problem set %s' % experiment_set['name'])
        params = Params()

        if experiment_set['additional_params']:
            for k, v in experiment_set['additional_params'].items():
                setattr(params, k, v)

        for problem in experiment_set['problems']:
            if '%s_Accuracy' % problem not in result_dict[
                    experiment_set['name']]:
                estiamtor = train_problem(params, problem, gpu,
                                          experiment_set['name'])
                eval_dict = eval_problem(params,
                                         problem,
                                         estiamtor,
                                         gpu,
                                         base=experiment_set['name'])
                result_dict[experiment_set['name']].update(eval_dict)
                print(result_dict)
                pickle.dump(result_dict, open('tmp/results.pkl', 'wb'))

    print(result_dict)

    pickle.dump(result_dict, open('tmp/results.pkl', 'wb'))
    create_result_table()
예제 #2
0
파일: train.py 프로젝트: harxish/Facenet-TF
    def __init__(self, json_path, data_dir, validate, ckpt_dir, log_dir,
                 restore):

        self.params = Params(json_path)
        self.valid = 1 if validate == '1' else 0
        self.model = face_model(self.params)

        self.lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
            self.params.learning_rate,
            decay_steps=10000,
            decay_rate=0.96,
            staircase=True)
        self.optimizer = tf.keras.optimizers.Adam(
            learning_rate=self.lr_schedule,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=0.1)

        self.checkpoint = tf.train.Checkpoint(
            model=self.model,
            optimizer=self.optimizer,
            train_steps=tf.Variable(0, dtype=tf.int64),
            valid_steps=tf.Variable(0, dtype=tf.int64),
            epoch=tf.Variable(0, dtype=tf.int64))
        self.ckptmanager = tf.train.CheckpointManager(self.checkpoint,
                                                      ckpt_dir, 3)

        if self.params.triplet_strategy == "batch_all":
            self.loss = batch_all_triplet_loss

        elif self.params.triplet_strategy == "batch_hard":
            self.loss = batch_hard_triplet_loss

        elif self.params.triplet_strategy == "batch_adaptive":
            self.loss = adapted_triplet_loss

        current_time = datetime.datetime.now().strftime("%d-%m-%Y_%H%M%S")
        log_dir += current_time + '/train/'
        self.train_summary_writer = tf.summary.create_file_writer(log_dir)

        if restore == '1':
            self.checkpoint.restore(self.ckptmanager.latest_checkpoint)
            print(
                f'\nRestored from Checkpoint : {self.ckptmanager.latest_checkpoint}\n'
            )

        else:
            print('\nIntializing from scratch\n')

        self.train_dataset, self.train_samples = get_dataset(
            data_dir, self.params, 'train')

        if self.valid:
            self.valid_dataset, self.valid_samples = get_dataset(
                data_dir, self.params, 'val')
예제 #3
0
def sample_nmist(size, iterations, epochs, path):
    '''Metodo de ejemplo para ejecución de optimizador de parametros con red neuronal'''
    import numpy as np
    np.random.seed(1)
    train, test = generate_nmist_dataset()
    params = Params(variable_params)
    print(params.optimize_params)

    print(len(train))

    optimizer = OptimizerGA(train, test, params, generate_model_ann)
    optimizer.verbose_train = 0
    optimizer.epochs_train = epochs
    optimizer.generate_population(size)
    for i in range(0, iterations):
        print("=> Generación ", i)
        optimizer.evolve(i == 0)
        print(optimizer.population_score())
    print(optimizer.population)
    optimizer.population_save(path)
예제 #4
0
def sample_nmist_parallel(size, iterations, epochs, path):
    '''Metodo de ejemplo para ejecución de optimizador de parametros en paralelo con red neuronal'''
    import numpy as np
    np.random.seed(1)
    is_master = False
    train, test = generate_nmist_dataset()
    params = Params(variable_params)

    optimizer = OptimizerGA(train, test, params, generate_model_ann)
    optimizer.verbose_train = 0
    optimizer.epochs_train = epochs
    optimizer.generate_population(size)
    for i in range(0, iterations):
        print("=> Generación ", i)
        is_master = optimizer.evolve_mpi(i == 0, best_prune=0.5) == "master"
        if is_master: print(optimizer.population_score())
    if is_master:
        print(optimizer.population)
        optimizer.population_save(path)
    else:
        print("Slave destroy.")
예제 #5
0
    def run_experiment(self):
        xmax = self.simulation_params.wall.get_xmax()
        ymax = self.simulation_params.wall.get_ymax()
        rc = self.simulation_params.interaction.get_rc()
        v0 = self.simulation_params.params.v0
        mu = self.simulation_params.params.mu
        deltat = self.simulation_params.params.deltat
        diffcoef = self.simulation_params.params.diffcoef
        sim_steps = self.simulation_params.simulation_steps
        interaction = self.simulation_params.interaction
        wall = self.simulation_params.wall
        init_positions = self.simulation_params.init_positions
        epsilon = self.simulation_params.params.epsilon
        sigma = self.simulation_params.params.sigma

        max_lambda = self.max_lambda if self.max_lambda is not None else min(
            xmax, ymax) / rc - 1
        the_lambdas = np.linspace(0, max_lambda, self.lambda_samples)
        times = []
        for lambd in the_lambdas:
            rv = (1 + lambd) * rc
            params = Params(rc=rc,
                            rv=rv,
                            v0=v0,
                            mu=mu,
                            deltat=deltat,
                            diffcoef=diffcoef,
                            epsilon=epsilon,
                            sigma=sigma)

            sim = Simulation(sim_steps, interaction, wall, init_positions,
                             params)
            sim.run()
            times.append(sim.total_phys_time)

        return the_lambdas, np.array(times)
예제 #6
0
        (assignment_map, initialized_variable_names
         ) = modeling.get_assignment_map_from_checkpoint(
             tvars, init_checkpoint)

        tf.train.init_from_checkpoint(init_checkpoint, assignment_map)

        tmp_g = tf.get_default_graph().as_graph_def()

    with tf.Session(config=config) as sess:
        tf.logging.info('load parameters from checkpoint...')
        sess.run(tf.global_variables_initializer())
        tf.logging.info('freeze...')
        tmp_g = tf.graph_util.convert_variables_to_constants(
            sess, tmp_g, [n.name[:-2] for n in output_tensors])
    tmp_file = os.path.join(params.ckpt_dir, 'export_model')
    tf.logging.info('write graph to a tmp file: %s' % tmp_file)
    with tf.gfile.GFile(tmp_file, 'wb') as f:
        f.write(tmp_g.SerializeToString())
    return tmp_file


if __name__ == "__main__":
    if FLAGS.model_dir:
        base_dir, dir_name = os.path.split(FLAGS.model_dir)
    else:
        base_dir, dir_name = None, None
    params = Params()
    params.assign_problem(FLAGS.problem, base_dir=base_dir, dir_name=dir_name)
    optimize_graph(params)
    params.to_json()
예제 #7
0
import xgboost as xgb
import numpy as np
from sklearn.ensemble import RandomForestRegressor

import src.constants as const
from src.data import Data
from src.evaluation import Evaluator as eval
from src.params import Params
from src.visualization import Visualisation as visual

input_files = [
    f for f in listdir(const.data_path) if isfile(join(const.data_path, f))
]

data = Data(input_files, fill_strategy='gate')
params = Params()

visual.create_feature_map(data.lagged_feature_names)

#visual.plot2d(data, 'crude_oil')
#visual.plot_all_2d(data)

# Global variables
xgb_num_rounds = 80

td = np.array(data.train_data.drop(['output'], axis=1))

train_data = data.train_data.drop(['output'], axis=1).as_matrix()
train_label = data.train_data[['output']].as_matrix()
test_data = data.test_data.drop(['output'], axis=1).as_matrix()
test_label = data.test_data[['output']].as_matrix()
rc = interaction.get_rc()
rv = (1+lambd)*rc

#wall = WallA(xmin, xmax, ymin, ymax)
wall = WallPeriodicBC(xmin, xmax, ymin, ymax)

X_u = np.linspace(xmin + rc, xmax - rc, 6)
Y_u = np.linspace(ymin + rc, ymax - rc, 6)
XX, YY= np.meshgrid(X_u, Y_u)

pts = np.vstack([XX.ravel(), YY.ravel()])
pts3 = np.column_stack([XX.ravel(), YY.ravel()])
print("Numero de particulas = %d" % len(pts3))


params = Params(rc=rc, rv=rv, v0=v0, mu=mu, deltat=deltat, diffcoef=diffcoef, epsilon=epsilon, sigma=sigma)

sim = Simulation(total_phys_time, interaction, wall, pts3, params, all_interactions=all_interactions,
                 save_interactions_idxs=True, save_point_to_point=True)

xsize = 1024
ysize = 1024

margin = 50

results = []
all_angles = []
all_interactions_list = []
for result, angles, interactions in sim.run_gen():
    results.append(np.copy(result))
    all_angles.append(np.copy(angles))
예제 #9
0
def main(_):

    if not os.path.exists('tmp'):
        os.mkdir('tmp')

    if FLAGS.model_dir:
        base_dir, dir_name = os.path.split(FLAGS.model_dir)
    else:
        base_dir, dir_name = None, None

    params = Params()
    params.assign_problem(FLAGS.problem,
                          gpu=int(FLAGS.gpu),
                          base_dir=base_dir,
                          dir_name=dir_name)

    tf.logging.info('Checkpoint dir: %s' % params.ckpt_dir)
    time.sleep(3)

    model = BertMultiTask(params=params)
    model_fn = model.get_model_fn(warm_start=False)

    dist_trategy = tf.contrib.distribute.MirroredStrategy(
        num_gpus=int(FLAGS.gpu),
        cross_tower_ops=tf.contrib.distribute.AllReduceCrossTowerOps(
            'nccl', num_packs=int(FLAGS.gpu)))

    run_config = tf.estimator.RunConfig(
        train_distribute=dist_trategy,
        eval_distribute=dist_trategy,
        log_step_count_steps=params.log_every_n_steps)

    # ws = make_warm_start_setting(params)

    estimator = Estimator(model_fn,
                          model_dir=params.ckpt_dir,
                          params=params,
                          config=run_config)

    if FLAGS.schedule == 'train':
        train_hook = RestoreCheckpointHook(params)

        def train_input_fn():
            return train_eval_input_fn(params)

        estimator.train(train_input_fn,
                        max_steps=params.train_steps,
                        hooks=[train_hook])

        def input_fn():
            return train_eval_input_fn(params, mode='eval')

        estimator.evaluate(input_fn=input_fn)

    elif FLAGS.schedule == 'eval':

        evaluate_func = getattr(metrics, FLAGS.eval_scheme + '_evaluate')
        print(evaluate_func(FLAGS.problem, estimator, params))

    elif FLAGS.schedule == 'predict':

        def input_fn():
            return predict_input_fn([
                '''兰心餐厅\n作为一个无辣不欢的妹子,对上海菜的偏清淡偏甜真的是各种吃不惯。
            每次出门和闺蜜越饭局都是避开本帮菜。后来听很多朋友说上海有几家特别正宗味道做
            的很好的餐厅于是这周末和闺蜜们准备一起去尝一尝正宗的本帮菜。\n进贤路是我在上
            海比较喜欢的一条街啦,这家餐厅就开在这条路上。已经开了三十多年的老餐厅了,地
            方很小,就五六张桌子。但是翻桌率比较快。二楼之前的居民间也改成了餐厅,但是在
            上海的名气却非常大。烧的就是家常菜,普通到和家里烧的一样,生意非常好,外面排
            队的比里面吃的人还要多。'''
            ],
                                    params,
                                    mode='predict')

        pred = estimator.predict(input_fn=input_fn)
        for p in pred:
            print(p)
예제 #10
0
    """
    no início do update apaga os arquivos anteriores para nova atualização
    """
    # busca o path com os arquivos
    path = Params.path_data_raw
    # cria lista com os arquivos
    lst_files = os.listdir(path)
    # arquivos a serem apagados
    files = ['product_links.csv', 'sub_cat.csv']
    # apaga cada arquivo
    [os.remove(path + item) for item in files if item in lst_files]
    f = open(Params.sub_cat_processed, 'w+')
    f = open(Params.product_links, 'w+')


def update(params):
    """
    Busca no ecommerce www.superabc.com.br todos os links dos produtos presentes no site e salva em um arquivo csv
    @param params: parametros definidos na classe Params, arquivo: params.py
    """
    # efetua a limpeza de dados anteriores
    clean_files()
    # busca os links
    get_links()
    # atualiza em um txt a data da atualização
    file = open(params.last_update_links, mode='w+')
    file.write(params.today)
    file.close()

params = Params()
update(params)