Esempio n. 1
0
def catboost_model():

    # Load data from CSV
    X_train = pd.read_csv(rootDirectory + "/data/processed/X_train.csv")
    y_train = pd.read_csv(rootDirectory + "/data/processed/y_train.csv")
    X_test = pd.read_csv(rootDirectory + "/data/processed/X_test.csv")

    # Load Test Data to Pull ID
    Original_Test = pd.read_csv(rootDirectory + "/data/raw/test.csv")
    test_Id = Original_Test['id']

    # Call CatBoost Model
    catmodel = M1_CatBoost.CatBoost_Model(X_train, y_train, X_test)

    if parameters.Parameters().run_train == True:
        # Perform Train Test Split Sequence
        catmodel.train_test_split()
        catmodel.train_pre_model()
        catmodel.predict_train_data()
        catmodel.cross_validated_catboost()
        catmodel.log_train_results()

    if parameters.Parameters().run_test == True:
        # Perform Full Train and Final Test Sequence
        catmodel.train_final_model()
        catmodel.predict_final_model()
        catmodel.output_submission(test_Id)
Esempio n. 2
0
 def setParams(self, sysWebParamsDict, mngWebClientAppParamsDict,
               statsWebAppParamsDict, techWebAppParamsDict,
               uxModuleParamsDict):
     self.sysWebParams = parameters.Parameters(dictionary=sysWebParamsDict)
     self.mngWebClientAppParams = parameters.Parameters(
         dirName=self.mngDataDir, dictionary=mngWebClientAppParamsDict)
     self.statsWebAppParams = parameters.Parameters(
         dirName=self.statsWebDataDir, dictionary=statsWebAppParamsDict)
     self.techWebAppParams = parameters.Parameters(
         dirName=self.techWebDataDir, dictionary=techWebAppParamsDict)
     self.uxModuleParams = parameters.Parameters(
         fileName=os.path.join(self.mngDataDir, "ux-parameters.json"),
         dictionary=uxModuleParamsDict)
     self.provideSharedParameters()
Esempio n. 3
0
def recenter_samples(ts, chains, logls, sigmafactor=0.1):
    """Generates a suite of samples around the maximum likelihood
    point in chains, with a reasonable error distribution."""

    sf = sigmafactor

    T = ts[-1] - ts[0]

    ibest = np.argmax(logls)
    p0 = params.Parameters(
        np.reshape(chains, (-1, chains.shape[-1]))[ibest, :])

    ncycle = T / p0.P
    ncorr = T / p0.tau
    nobs = len(ts)

    samples = params.Parameters(np.copy(chains))

    assert samples.npl == 1, 'require exactly one planet'
    assert samples.nobs == 1, 'require exactly one observatory'

    samples.V = np.random.normal(loc=p0.V,
                                 scale=sf * p0.sigma / np.sqrt(nobs),
                                 size=samples.V.shape)
    samples.sigma0 = np.random.lognormal(mean=np.log(p0.sigma0),
                                         sigma=sf / np.sqrt(nobs),
                                         size=samples.simag0.shape)
    samples.sigma = np.random.lognormal(mean=np.log(p0.sigma),
                                        sigma=sf / np.sqrt(ncorr),
                                        size=samples.sigma.shape)
    samples.tau = np.random.lognormal(mean=np.log(p0.tau),
                                      sigma=sf / np.sqrt(ncorr),
                                      size=samples.tau.shape)
    samples.K = np.random.normal(loc=p0.K,
                                 scale=sf * p0.K / np.sqrt(nobs),
                                 size=samples.K.shape)
    samples.n = np.random.lognormal(mean=np.log(p0.n),
                                    sigma=sf / np.sqrt(ncycle),
                                    size=samples.n.shape)
    samples.chi = np.random.lognormal(mean=np.log(p0.chi),
                                      sigma=sf / np.sqrt(ncycle),
                                      size=samples.chi.shape)
    samples.e = np.random.lognormal(mean=np.log(p0.e),
                                    sigma=sf / np.sqrt(ncycle),
                                    size=samples.e.shape)
    samples.omega = np.random.lognormal(mean=np.log(p0.omega),
                                        sigma=sf / np.sqrt(ncycle),
                                        size=samples.omega.shape)

    return samples
    def restart( self ):
        """
        use to restart the app without ending it - also extend init
        parameters will be reloaded and the gui rebuilt
        args: zip
        ret: zip ... all sided effects
        """
        #rint( "===================restart===========================" )
        self.no_restarts    += 1
        if self.gui is not None:

            self.logger.info( self.app_name + ": restart" )

            # self.post_to_queue( "stop", None  , (  ) ) # need to shut down other thread
            # self.helper_thread.join()

            self.gui.close()

            importlib.reload( parameters )    #
        self.polling_mode = "off"
        #self._polling_fail        = False   # flag set if _polling in gui thread fails

#        self.is_first_gui_loop    = True
        #self.ext_processing       = None    # built later from parameters if specified
        self.logger             = None    # set later none value protects against call against nothing

        # ----- parameters
        self.parmeters_x        = "none"        # name without .py for parameters
                                                #extension may ?? be replaced by command line args

        self.select_manager     = None          # populate in cb_change_select_type
        self.parameters         = parameters.Parameters( )
        self.starting_dir       = os.getcwd()

        self.logger_id          = self.parameters.logger_id       # std name
        self.logger             = self.config_logger()            # std name

        AppGlobal.logger        = self.logger
        AppGlobal.logger_id     = self.logger_id

        self.prog_info()

        self.gui                = gui.GUI(  )
        self.last_begin_dt,  self.last_end_dt  = self.gui.get_dt_begin_end() # after gui is built

        # now most of setup memory has been allocated -- may want to check in again later, save this value ??
        process      = psutil.Process(os.getpid())    #  import psutil
        mem          = process.memory_info().rss
        # convert to mega and format
        mem_mega     = mem/( 1e6 )
        msg          = f"process memory = {mem_mega:10,.2f} mega bytes "
        print( msg )
        # set up gui thread polling if delta > 0
        self.logger.log( AppGlobal.force_log_level, msg )
        self.polling_delta  = self.parameters.polling_delta
        if self.polling_delta > 0:
            self.gui.root.after( self.polling_delta, self.polling_0 )
        self.gui.run()

        self.logger.info( self.app_name + ": all done" )
Esempio n. 5
0
    def __init__(self, config=None):
        '''
        Create a instance and init all the varaibles.
        '''
        self.config = config
        self.data = data.DataList()
        self.script = ''
        self.parameters = parameters.Parameters(model=self)

        #self.fom_func = default_fom_func
        self.fom_func = fom_funcs.log  # The function that evaluates the fom
        self.fom = None  # The value of the fom function

        self.fom_mask_func = None
        self.fom_ignore_nan = False
        self.fom_ignore_inf = False
        self.create_fom_mask_func()

        # Registred classes that is looked for in the model
        self.registred_classes = []
        #self.registred_classes = ['Layer','Stack','Sample','Instrument',\
        #                            'model.Layer', 'model.Stack',\
        #                             'model.Sample','model.Instrument',\
        #                             'UserVars','Surface','Bulk']
        self.set_func = 'set'  #'set'
        self._reset_module()

        # Temporary stuff that needs to keep track on
        self.filename = ''
        self.saved = True
        self.compiled = False
Esempio n. 6
0
def load_parameters(filename):
    with open(filename) as f:
        r = yaml.load(f)
    for d in defaults:
        if d not in r:
            r[d] = defaults[d]
    return parameters.Parameters(**r)
Esempio n. 7
0
def main():
    pa = parameters.Parameters()

    pa.simu_len = 200  # 5000  # 1000
    pa.num_ex = 10  # 100
    pa.num_nw = 10
    pa.num_seq_per_batch = 20
    # pa.max_nw_size = 5
    # pa.job_len = 5
    pa.new_job_rate = 0.3
    pa.discount = 1

    pa.episode_max_length = 20000  # 2000

    pa.compute_dependent_parameters()

    render = False

    plot = True  # plot slowdown cdf

    pg_resume = None
    pg_resume = 'data/pg_re_discount_1_rate_0.3_simu_len_200_num_seq_per_batch_20_ex_10_nw_10_1450.pkl'
    # pg_resume = 'data/pg_re_1000_discount_1_5990.pkl'

    pa.unseen = True

    launch(pa, pg_resume, render, plot, repre='image', end='all_done')
def split_data(main_data, processed_data, filepath):

    input = parameters.Parameters()

    # Select headers
    train_col = input.continuous_variables + input.categorical_variables
    test_col = ['log_price']

    #X_Train
    X_train = processed_data.loc[(processed_data['Source'] == "Train")]
    X_train = X_train.drop('Source', axis=1)

    # y_Train
    y_train = main_data[test_col].loc[(main_data['Source'] == "Train")]

    # X_Test
    X_test = processed_data.loc[(processed_data['Source'] == "Test")]
    X_test = X_test.drop('Source', axis=1)

    # Display shape
    print("X_train Shape: {}".format(X_train.shape))
    print("y_train Shape: {}".format(y_train.shape))
    print("X_test Shape: {}".format(X_test.shape))

    #Output Data
    X_train.to_csv(filepath + '/X_train.csv', index=False, encoding='utf-8')
    y_train.to_csv(filepath + '/y_train.csv', index=False, encoding='utf-8')
    X_test.to_csv(filepath + '/X_test.csv', index=False, encoding='utf-8')

    print("Split Data Complete.")
Esempio n. 9
0
def main():
    import parameters

    pa = parameters.Parameters()

    pa.simu_len = 50  # 1000
    pa.num_ex = 10  # 100
    pa.num_nw = 5
    pa.num_seq_per_batch = 10
    pa.output_freq = 50
    pa.batch_size = 10
    pa.output_filename = "dqn_data2_relu_under_zero/tmp"

    # pa.max_nw_size = 5
    # pa.job_len = 5
    pa.new_job_rate = 0.3

    pa.episode_max_length = 20000  # 2000

    pa.num_epochs = 202
    pa.lr_rate = 0.02

    pa.compute_dependent_parameters()

    pg_resume = None
    # pg_resume = 'data/tmp_3500.ckpt'

    render = False

    launch_dqn(pa, pg_resume, render, repre='image', end='all_done')
Esempio n. 10
0
def master(train_data, dev_data, utility):
    #creates TF graph and calls trainer or evaluator
    batch_size = utility.FLAGS.batch_size
    model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/"
    #create all paramters of the model
    param_class = parameters.Parameters(utility)
    params, global_step, init = param_class.parameters(utility)
    key = "test" if (FLAGS.evaluator_job) else "train"
    graph = model.Graph(utility,
                        batch_size,
                        utility.FLAGS.max_passes,
                        mode=key)
    graph.create_graph(params, global_step)
    prev_dev_error = 0.0
    final_loss = 0.0
    final_accuracy = 0.0
    #start session
    with tf.Session() as sess:
        sess.run(init.name)
        sess.run(graph.init_op.name)
        to_save = params.copy()
        saver = tf.train.Saver(to_save, max_to_keep=500)
        if (FLAGS.evaluator_job):
            while True:
                selected_models = {}
                file_list = tf.gfile.ListDirectory(model_dir)
                for model_file in file_list:
                    if ("checkpoint" in model_file or "index" in model_file
                            or "meta" in model_file):
                        continue
                    if ("data" in model_file):
                        model_file = model_file.split(".")[0]
                    model_step = int(
                        model_file.split("_")[len(model_file.split("_")) - 1])
                    selected_models[model_step] = model_file
                file_list = sorted(list(selected_models.items()),
                                   key=lambda x: x[0])
                if (len(file_list) > 0):
                    file_list = file_list[0:len(file_list) - 1]
                print(("list of models: ", file_list))
                for model_file in file_list:
                    model_file = model_file[1]
                    print(("restoring: ", model_file))
                    saver.restore(sess, (model_dir + "/" + model_file).replace(
                        '//', '/'))
                    model_step = int(
                        model_file.split("_")[len(model_file.split("_")) - 1])
                    print(("evaluating on dev ", model_file, model_step))
                    evaluate(sess, dev_data, batch_size, graph, model_step)
        else:
            ckpt = tf.train.get_checkpoint_state(model_dir)
            print(("model dir: ", model_dir))
            if (not (tf.gfile.IsDirectory(utility.FLAGS.output_dir))):
                print(("create dir: ", utility.FLAGS.output_dir))
                tf.gfile.MkDir(utility.FLAGS.output_dir)
            if (not (tf.gfile.IsDirectory(model_dir))):
                print(("create dir: ", model_dir))
                tf.gfile.MkDir(model_dir)
            Train(graph, utility, batch_size, train_data, sess, model_dir,
                  saver)
Esempio n. 11
0
    def main(self):
        """
        Usage: python ./MktStall.py arg1 [input] arg2 [results] arg3 [tmpdir] arg4 [debug] arg5 [verbose]
        :return:
        """
        ###############################################
        # KEEP ME FOR CMD LINE STAGE
        # for arg in sys.argv[1:]:
        # this is to take argvs from cmdline
        ###############################################
        p = parameters.Parameters()
        self.INPUT = p.input
        self.RESULTS = p.results
        self.TMP_DIR = p.tmp_dir
        #self.config_file = ConfigMaker(self.TMP_DIR)

        print self.INPUT
        print self.RESULTS
        print self.TMP_DIR

        if not os.path.exists(self.RESULTS):
            os.makedirs(self.RESULTS)
        if not os.path.exists(self.RESULTS + '/images'):
            os.makedirs(self.RESULTS + '/images')
        if not os.path.exists(self.RESULTS + '/html'):
            os.makedirs(self.RESULTS + '/html')
        if not os.path.exists(self.RESULTS + '/tables'):
            os.makedirs(self.RESULTS + '/tables')
        if not os.path.exists(self.RESULTS + '/worddoc'):
            os.makedirs(self.RESULTS + '/worddoc')
        self.start()
Esempio n. 12
0
def main():
    params = parameters.Parameters()
    params.read(sys.argv[1:])
    sim = False

    for task in params.task:
        if task == "app":
            dash_ui.launcher.launch_app(params)

        elif task == "help":
            if (len(sys.argv) > 2):
                params.help(sys.argv[2])
            else:
                params.help()

        elif task == "track":
            tracking.track(params)

        elif task == "simulate":
            simulation.simulate(params)
            sim = True

        elif task == "postprocess":
            postprocessing.postprocess(params, simulated=sim)

        elif task == "view":
            visualisation.render(params)

        elif task == "compare":
            trajectories.compare_trajectories(params)

        else:
            sys.exit(f"ERROR: Task {task} is not yet implemented. Aborting...")
Esempio n. 13
0
def main():

    import parameters

    pa = parameters.Parameters()

    pa.simu_len = 50  # 1000
    pa.num_ex = 50  # 100
    pa.num_nw = 10
    pa.num_seq_per_batch = 20
    pa.output_freq = 50
    pa.batch_size = 10

    # pa.max_nw_size = 5
    # pa.job_len = 5
    pa.new_job_rate = 0.3

    pa.episode_max_length = 2000  # 2000

    pa.compute_dependent_parameters()

    pg_resume = None
    # pg_resume = 'data/tmp_450.pkl'

    render = False

    launch(pa, pg_resume, render, repre='image', end='all_done')
Esempio n. 14
0
def generate_initial_sample(pmin, pmax, ntemps, nwalkers):
    """Generates an initial sample of parameters drawn uniformly from
    the prior ."""

    npl = pmin.npl
    nobs = pmin.nobs

    assert npl == pmax.npl, 'Number of planets must agree in prior bounds'
    assert nobs == pmax.nobs, 'Number of observations must agree in prior bounds'

    N = pmin.shape[-1]

    samps = params.Parameters(arr=np.zeros((ntemps, nwalkers, N)),
                              nobs=nobs,
                              npl=npl)

    V = samps.V
    tau = samps.tau
    sigma = samps.sigma
    sigma0 = samps.sigma0
    for i in range(nobs):
        V[:, :, i] = nr.uniform(low=pmin.V[i],
                                high=pmax.V[i],
                                size=(ntemps, nwalkers))
        tau[:, :, i] = draw_logarithmic(low=pmin.tau[i],
                                        high=pmax.tau[i],
                                        size=(ntemps, nwalkers))
        sigma[:, :, i] = draw_logarithmic(low=pmin.sigma[i],
                                          high=pmax.sigma[i],
                                          size=(ntemps, nwalkers))
        sigma0[:, :, i] = draw_logarithmic(low=pmin.sigma[i],
                                           high=pmax.sigma[i],
                                           size=(ntemps, nwalkers))
    samps.V = np.squeeze(V)
    samps.tau = np.squeeze(tau)
    samps.sigma = np.squeeze(sigma)
    samps.sigma0 = np.squeeze(sigma0)

    if npl >= 1:
        samps.K = np.squeeze(
            draw_logarithmic(low=pmin.K[0],
                             high=pmax.K[0],
                             size=(ntemps, nwalkers, npl)))

        # Make sure that periods are increasing
        samps.n = np.squeeze(
            np.sort(
                draw_logarithmic(low=pmin.n,
                                 high=pmax.n,
                                 size=(ntemps, nwalkers, npl)))[:, :, ::-1])

        samps.e = np.squeeze(
            nr.uniform(low=0.0, high=1.0, size=(ntemps, nwalkers, npl)))
        samps.chi = np.squeeze(
            nr.uniform(low=0.0, high=1.0, size=(ntemps, nwalkers, npl)))
        samps.omega = np.squeeze(
            nr.uniform(low=0.0, high=2.0 * np.pi,
                       size=(ntemps, nwalkers, npl)))

    return samps
Esempio n. 15
0
    def __init__(self, config=None):
        '''
        Create a instance and init all the varaibles.
        '''
        self.config = config
        #self.data
        self.data = data.DataList()
        self.data_original = data.DataList()
        self.script = ''
        self.parameters = parameters.Parameters()

        #self.fom_func = default_fom_func
        # self.fom_func = fom_funcs.log # The function that evaluates the fom
        self.fom_func = fom_funcs.chi2bars  # The function that evaluates the fom
        self.fom = None  # The value of the fom function
        self.weight_factor = 1  #fom weighting factor
        self.weight_map = {}  #fom weighting factor map
        self.weight_decorator = weight_fom_based_on_HKL

        # Registred classes that is looked for in the model
        self.registred_classes = []
        #self.registred_classes = ['Layer','Stack','Sample','Instrument',\
        #                            'model.Layer', 'model.Stack',\
        #                             'model.Sample','model.Instrument',\
        #                             'UserVars','Surface','Bulk']
        self.set_func = 'set'  #'set'
        self._reset_module()

        # Temporary stuff that needs to keep track on
        self.filename = ''
        self.saved = True
        self.compiled = False
Esempio n. 16
0
def posterior_data_mean_quantiles(ts, rvs, psamples):
    """Returns the average of the quantiles of the data residuals over
    the posterior samples in psamples.  The quantiles over multiple
    observatories are flattened into one array. """

    Nobs = len(ts)
    Nsamples = psamples.shape[0]

    Npl = (psamples.shape[-1] - 4 * Nobs) / 5

    psamples = params.Parameters(arr=psamples, npl=Npl, nobs=Nobs)

    ll = LogLikelihood(ts, rvs)

    qs = np.zeros(sum([len(t) for t in ts]))

    for psample in psamples:
        one_qs = []
        for t, rv, V, sigma0, tau, sigma in zip(ts, rvs, psample.V,
                                                psample.sigma0, psample.tau,
                                                psample.sigma):
            one_qs.append(
                correlated_gaussian_quantiles(
                    ll.residuals(t, rv, psample), V * np.ones_like(t),
                    generate_covariance(t, sigma0, sigma, tau)))
        qs += np.array(one_qs).flatten() / Nsamples

    return qs
Esempio n. 17
0
def run(kwargs):

    global trials

    #  Parameters definition
    param = {
        "n_positions": 21,
        "n_prices": 11,
        "n_firms": 2,
        "alpha": kwargs["alpha"],
        "momentum": 0,
        "temp": kwargs["temp"],
        "n_simulations": 1,
        "t_max": 3000,
        "zombies_customers": False,
        "mode": "p_fixed",
        "discrete": True,
        "fields_of_view": [0.3, 0.7],
        "fov_boundaries": [0, 1],
        "firm_class": ["Firm", "Firm"],
        "unit_value": 1,
        "unique": True,
        "unique_fov": True
    }

    param = parameters.Parameters(**param)

    # To return: mean of this list
    profits = []

    # Environment object
    for field in [0.3, 0.4, 0.5, 0.6, 0.7]:

        e = env.Environment(parameters=param,
                            field_of_view=field,
                            init_firm_positions=[10, 10],
                            init_firm_prices=[5, 5])

        for t in range(param.t_max):

            print("\rTrial {} => cond:{}, time step: {}".format(
                trials, field, t),
                  end='')

            # New time step
            e.time_step_first_part()

            # End turn
            e.time_step_second_part()

            if t > 0.33 * param.t_max:
                profits.append(np.mean(e.profits))

    trials += 1

    return -np.mean(profits)
Esempio n. 18
0
def main():

    # Run load data when selected at parameter
    if parameters.Parameters().run_load_data == True:
        fx_make_dataset()
        fx_build_features()
        preprocess_dataset()

    # Run the Catboost data pipeline
    catboost_model()
Esempio n. 19
0
    def __init__(self, pmin=None, pmax=None, npl=1, nobs=1):
        """Initialize with the given bounds on the priors."""

        if pmin is None:
            self._pmin = params.Parameters(npl=npl, nobs=nobs)
            self._pmin = 0.0 * self._pmin
            self._pmin.V = float('-inf')
        else:
            self._pmin = pmin

        if pmax is None:
            self._pmax = params.Parameters(npl=npl, nobs=nobs)
            self._pmax = self._pmax + float('inf')
            self._pmax.chi = 1.0
            self._pmax.e = 1.0
            self._pmax.omega = 2.0 * np.pi
        else:
            self._pmax = pmax

        self._npl = npl
        self._nobs = nobs
Esempio n. 20
0
    def __init__(self, parent):
        gridlib.PyGridTableBase.__init__(self)
        self.parent = parent
        self.pars = parameters.Parameters()

        self.data_types = [
            gridlib.GRID_VALUE_STRING,
            gridlib.GRID_VALUE_FLOAT,
            gridlib.GRID_VALUE_BOOL,
            gridlib.GRID_VALUE_FLOAT,
            gridlib.GRID_VALUE_FLOAT,
            gridlib.GRID_VALUE_STRING,
        ]
Esempio n. 21
0
def test_backlog():
    pa = parameters.Parameters()
    pa.num_nw = 5
    pa.simu_len = 50
    pa.num_ex = 10
    pa.new_job_rate = 1
    pa.compute_dependent_parameters()

    env = Env(pa, render=False, repre='image')

    env.step(5)
    env.step(5)
    env.step(5)
    env.step(5)
    env.step(5)

    env.step(5)
    assert env.job_backlog.backlog[0] is not None
    assert env.job_backlog.backlog[1] is None
    print
    "New job is backlogged."

    env.step(5)
    env.step(5)
    env.step(5)
    env.step(5)

    job = env.job_backlog.backlog[0]
    env.step(0)
    assert env.job_slot.slot[0] == job

    job = env.job_backlog.backlog[0]
    env.step(0)
    assert env.job_slot.slot[0] == job

    job = env.job_backlog.backlog[0]
    env.step(1)
    assert env.job_slot.slot[1] == job

    job = env.job_backlog.backlog[0]
    env.step(1)
    assert env.job_slot.slot[1] == job

    env.step(5)

    job = env.job_backlog.backlog[0]
    env.step(3)
    assert env.job_slot.slot[3] == job

    print
    "- Backlog test passed -"
Esempio n. 22
0
def instantiate():
    ''' returns four objects: 
        p: the parameter dictionary
        r: reaction rates
        m: model object
        s: simulator
    '''
    p = parameters.Parameters()
    r = reactionrates.Reactions()
    m = model.PETC2014(p, r)
    s = modelbase.Simulator(m)

    print('Created a virtual organism. Experiment ready to run')
    return p, r, m, s
Esempio n. 23
0
def main():

    start_time = time.time()

    gen_man = geneology_manager.GeneologyManager()

    params = parameters.Parameters(NAME, GENERATIONS, CHILDREN_PER_GENERATION,
                                   PARENTS_PER_PROCREATION,
                                   INHERITED_TRAIT_WEIGHTS,
                                   AQUIRED_TRAIT_WEIGHTS,
                                   INHERITED_TRAIT_ADVANTAGES)
    gen_man.create(params)

    print("time: " + str(time.time() - start_time))
    print("gens: " + str(GENERATIONS))
Esempio n. 24
0
def calculate_mean(results_path, pictures, pictures_no, matches, bad_matches,
                   configuration):

    circle_points_number = configuration[0]
    distance_ratio = configuration[1]
    circle_radius_ratio = configuration[2]

    # config.set('circle_points_number', circle_points_number)
    # config.set('distance_ratio', distance_ratio)
    # config.set('circle_radius_ratio', circle_radius_ratio)
    config = param.Parameters(circle_points_number, distance_ratio,
                              circle_radius_ratio)
    parameters = config.get_parameters()

    suite_name = 'params_{0}_{1}_{2}'.format(circle_points_number,
                                             int(distance_ratio),
                                             int(circle_radius_ratio * 100))

    dump_file_name = os.path.join(results_path, suite_name)

    descriptor = ds.Descriptor(parameters)

    descriptors = calc_descriptors(pictures, pictures_no, descriptor)

    good_matches_mean = get_results_mean(matches, descriptors, parameters,
                                         descriptor)
    bad_matches_mean = get_results_mean(bad_matches, descriptors, parameters,
                                        descriptor)

    mean_difference = bad_matches_mean - good_matches_mean

    with open(dump_file_name, "w+") as dump:
        dump.write('# Results section (good_mean, bad_mean, mean_difference\n')
        dump.write(','.join(
            map(str,
                [good_matches_mean, bad_matches_mean, mean_difference, '\n'])))

    config.dump_to_file(dump_file_name)

    result = ','.join(
        map(str,
            [suite_name, good_matches_mean, bad_matches_mean, mean_difference
             ]))
    # print("good matches mean is {0}".format(good_matches_mean))
    # print("bad matches mean is {0}".format(bad_matches_mean))

    return result
Esempio n. 25
0
def main(args):
    pa = parameters.Parameters()
    # Double check that the directory exists
    directory = args["input_path"]
    if not os.path.isdir(directory):
        print("Please specify a valid path to your data set")
        return

    # Iterate through all files in the specified directory
    files_to_process = [f for f in os.listdir(directory)]
    for file_name in files_to_process:
        full_path = os.path.join(directory, file_name)
        image = cv2.imread(full_path, 0)
        print("Processing", file_name)
        #image = helpers.bilateral_filter(image,9,150,150)
        image_dict = {}
        if args["convert_to_temp"]:
            image_dict["low"], image_dict["high"] = helpers.read_scale(
                image, pa)
        if args["filter_scale"]:
            # TODO(charlie): implement
            pass
        else:
            image = crop_scale(image)

        if args["filter_seek_logo"]:
            # TODO(charlie): implement
            pass
        else:
            image = crop_seek_logo(image)

        if args["filter_location"]:
            # TODO(charlie): implement
            pass
        else:
            image = crop_location(image)

        image = resize(image)
        if args["convert_to_temp"]:
            abs_image = helpers.grayscale_to_temp(image, image_dict["low"],
                                                  image_dict["high"])
            np.save(os.path.join(args["output_path"],
                                 file_name.split(".")[0]), abs_image)
        else:
            cv2.imwrite(os.path.join(args["output_path"], file_name), image)
Esempio n. 26
0
    def __call__(self, p):
        p = params.Parameters(p, npl=self._npl, nobs=self._nobs)

        # Check bounds
        if np.any(p < self._pmin) or np.any(p > self._pmax):
            return float('-inf')

        # Ensure unique labeling of planets: in increasing order of
        # period
        if p.npl > 1 and np.any(p.P[1:] < p.P[:-1]):
            return float('-inf')

        pr = 0.0

        # Uniform prior on velocity offset

        # Jeffreys scale prior on sigma0
        for s in p.sigma0:
            pr -= np.sum(np.log(s))

        # Jeffreys scale prior on sigma
        for s in p.sigma:
            pr -= np.sum(np.log(s))

        # Jeffreys scale prior on tau
        for t in p.tau:
            pr -= np.sum(np.log(t))

        # Jeffreys scale prior on K
        for k in p.K:
            pr -= np.sum(np.log(k))

        # Jeffreys scale prior on n
        for n in p.n:
            pr -= np.sum(np.log(n))

        # Uniform prior on chi

        # Thermal prior on e
        for e in p.e:
            pr += np.sum(np.log(e))

        # Uniform prior on omega

        return pr
Esempio n. 27
0
def test_compact_speed():
    pa = parameters.Parameters()
    pa.simu_len = 50
    pa.num_ex = 10
    pa.new_job_rate = 0.3
    pa.compute_dependent_parameters()

    env = Env(pa, render=False, repre='compact')

    import other_agents
    import time

    start_time = time.time()
    for i in xrange(100000):
        a = other_agents.get_sjf_action(env.machine, env.job_slot)
        env.step(a)
    end_time = time.time()
    print "- Elapsed time: ", end_time - start_time, "sec -"
Esempio n. 28
0
def build_graph(utility):
    """ Build Neural Programmer graph """
    # creates TF graph and calls evaluator
    batch_size = utility.FLAGS.batch_size
    model_dir = utility.FLAGS.output_dir + "/model" + utility.FLAGS.job_id + "/"
    # create all paramters of the model
    param_class = parameters.Parameters(utility)
    params, global_step, init = param_class.parameters(utility)
    key = "test"  #if (FLAGS.evaluator_job) else "train"
    graph = model.Graph(utility,
                        batch_size,
                        utility.FLAGS.max_passes,
                        mode="test")
    graph.create_graph(params, global_step)
    sess = tf.InteractiveSession()
    sess.run(init.name)
    sess.run(graph.init_op.name)
    return sess, graph, params
Esempio n. 29
0
    def new_model(self):
        '''
        new_model(self) --> None

        Reinitilizes the model. Thus, removes all the traces of the
        previous model.
        '''
        self.data = data.DataList()
        self.script = ''
        self.parameters = parameters.Parameters()

        #self.fom_func = default_fom_func
        self.fom_func = fom_funcs.log
        self._reset_module()

        # Temporary stuff that needs to keep track on
        self.filename = ''
        self.saved = False
Esempio n. 30
0
    def __call__(self, p):
        nobs = len(self.rvs)
        npl = (p.shape[-1] - 4 * nobs) / 5

        p = params.Parameters(p, nobs=nobs, npl=npl)

        ll = 0.0

        for t, rvobs, V, sigma0, sigma, tau in zip(self.ts, self.rvs, p.V,
                                                   p.sigma0, p.sigma, p.tau):
            residual = self.residuals(t, rvobs, p)

            cov = generate_covariance(t, sigma0, sigma, tau)

            ll += correlated_gaussian_loglikelihood(residual,
                                                    V * np.ones_like(residual),
                                                    cov)

        return ll