Esempio n. 1
0
def run(acc_operation):
    est = Estimator()

    a.write(b'\x01')
    lastTp = time.time()

    while True:
        data = a.read(14)
        raw_acc = read_data(data, 0)
        raw_gyr = read_data(data, 8)

        # print(data)

        tp = time.time()
        dt = tp - lastTp
        lastTp = tp

        # attitude estimation
        vp = est.feed_data(dt, raw_gyr, raw_acc)
        # print(vp, end="\r")
        for i in range(3):
            vp[i] = vp[i] * TIMES
        # print(vp)

        ret, logits = acc_operation.feed_data(vp)
        operation(ret, logits)
Esempio n. 2
0
def run(model_dir):
    acce_operation = Acce_operation(model_dir)
    est = Estimator()

    a.write(b'\x01')
    window_data = np.array([[0 for i in range(TOTAL_WINDOW_SIZE)], [0 for i in range(TOTAL_WINDOW_SIZE)],
                            [0 for i in range(TOTAL_WINDOW_SIZE)]], dtype=np.float32)
    index = 0
    sample_num = 0
    lastTp = time.time()

    while True:
        data = a.read(14)
        raw_acc = read_data(data, 0)
        raw_gyr = read_data(data, 8)

        tp = time.time()
        dt = tp - lastTp
        lastTp = tp

        # attitude estimation
        vp = est.feed_data(dt, raw_gyr, raw_acc)
        # print(vp, end="\r")
        for i in range(3):
            window_data[i][index] = vp[i]

        index += 1
        sample_num += 1
        if index == TOTAL_WINDOW_SIZE:
            index = 0
        if sample_num == SAMPLE_SIZE:
            sample_num = 0
            operate(acce_operation, window_data, index)
Esempio n. 3
0
    def __init__(self, fb, fs, pairs=None):
        Estimator.__init__(self, fs, pairs)

        self.csd_nfft = 256
        self.csd_noverlap = self.csd_nfft / 2.0
        self.fb = fb
        self.fs = fs
Esempio n. 4
0
    def find(self):
        estimator = Estimator()
        frame = self.video.get_next_frame()
        while frame is not None:
            start = time.time()

            # processing goes here
            predictions = self.get_predictions(frame)
            if not self.is_nsfw(predictions) and not self.maybe_nsfw(
                    predictions):
                self.push_to_summary(self.current_frame_category,
                                     self.video.current_seconds)
                self.video_writer.write(frame)
            else:
                self.push_to_summary(self.current_frame_category,
                                     self.video.current_seconds)

                opt_blur = self.options["blur"]
                if opt_blur is not None:
                    x, y = opt_blur.split(",")
                    self.video_writer.write(blur(frame, (int(x), int(y))))

            end = time.time()
            estimator.log_progress(start, end, self.video.current_frame_num,
                                   self.video.frames_count)

            frame = self.video.get_next_frame()
Esempio n. 5
0
    def __init__(self, name, logger, scheduler_queue, hub_queue, timer):
        self.name = name  # e.g., 'UTIL'
        self.logger = logger
        self.scheduler_queue = scheduler_queue
        self.hub_queue = hub_queue
        self.timer = timer

        self.estimator = Estimator("estimator", self.logger)

        self.cluster_num_cpu = None
        self.cluster_num_mem = None
        self.cluster_num_bw = None
        self.cluster_num_gpu = None
        self.cluster_used_cpu = 0
        self.cluster_used_mem = 0
        self.cluster_used_gpu = 0
        self.cluster_used_bw = 0
        self._set_cluster_config()

        self.queueing_jobs = Queue.PriorityQueue()
        self.uncompleted_jobs = []
        self.completed_jobs = []
        self.cur_ts_completed_jobs = []
        self.not_ready_jobs = set()

        self.exit_flag = False

        self.msg_handler = threading.Thread(target=self._msg_handle, args=())
        self.msg_handler.start()
        self.scaling_overhead = 0
        self.testing_overhead = 0
Esempio n. 6
0
    def __init__(self, fname=None):
        # Default values, see explanations below:        
        taskDic = {
        'taskName': 'total energy',
        'tolerance': '1',
        'nMaxSteps': '10'
        }
        Estimator.__init__(self,fname)
        Launcher.__init__(self,fname)                
        # value to converge with respect to k-points or energy cutoffs
        # currently can be 'total energy', 'single phonon', or 'geometry':
        self.taskName = self.config.get('Task', 'taskName')

        # convergence criteria in percents:        
        self.tolerance = self.config.getfloat('Task','tolerance')
        
        # maximum number of optimization steps:
        self.nMaxSteps = self.config.getint('Task','nMaxSteps')  
        
        self.lookupTable = {
        'total energy' : (self.pwscfLauncher, self.getTotalEnergy),
        'single phonon': (self.singlePhononLauncher, self.getSinglePhonon),
        'geometry'     : (self.pwscfLauncher, self.getLatticeParameters),
        'multiple phonon': (self.multiPhononLauncher, self.getMultiPhonon)
        }        
        
        assert self.lookupTable.has_key(self.taskName), "Convergence \
Esempio n. 7
0
def estimate_mu(verbose=True):
    sections_docs_training = read_training_docs()
    estimator = Estimator(**sections_docs_training)
    report = '\nEstimated mu: {}\n\n'.format(estimator.calculate_mu())
    file = codecs.open('report.txt', 'w', encoding='utf-8')
    file.write(report)
    file.close()
    print(report)
Esempio n. 8
0
def experiment():

    args = parse_args()
    train_data = VIDataset(config.root_dir, is_train=True)
    eval_data = VIDataset(config.root_dir, is_train=False)

    estimator = Estimator(train_data, eval_data)
    estimator.train_roam(args)
Esempio n. 9
0
def exp(args):
    estimator = Estimator(emb_dim=args.emb_dim,
                          n_hidden=args.n_hidden,
                          bidirectional=args.bi,
                          n_layer=args.n_layer,
                          dropout=args.dropout,
                          lr=args.lr,
                          decay=args.decay,
                          lr_p=args.lr_p,
                          clip=args.clip,
                          batch_size=args.batch,
                          epoch_num=args.epoch_num,
                          cuda=args.cuda,
                          path=args.path)
    vocab_size = estimator.vocab_size
    (train_batches,
     test_batches), id2rule, productions = estimator.dataset.parse(
         grammar_dict, root_rule)

    nonterminal2id, id2nonterminal = estimator.build_decode_dict(productions)

    model = Seq2seqModel(vocab_size=vocab_size,
                         emb_dim=args.emb_dim,
                         n_hidden=args.n_hidden,
                         output_size=estimator.dataset.grammar.num_rules + 1,
                         bidirectional=args.bi,
                         n_layer=args.n_layer,
                         dropout=args.dropout)
    m = torch.load('dirty/model')

    model.load_state_dict(m['net'])
    model.cuda()

    model.eval()
    total = 0
    true_example = 0
    for batch in test_batches:
        batch_actions = model.batch_decode(batch.questions, batch.src_lens,
                                           PAD, 200, nonterminal2id,
                                           id2nonterminal)[0]
        #id2rule也许可以被production替代
        batch_actions = torch.stack(batch_actions).transpose(0, 1)
        total += len(batch_actions)
        for actions, logical_form in zip(batch_actions, batch.logical_forms):
            for i in range(len(actions)):
                if int(actions[i]) == 0:
                    actions = actions[:i]
                    break
            rule_str = [id2rule[int(act)] for act in actions]
            rule = normalize_prolog_variable_names(
                action_sequence_to_logical_form(rule_str))
            if rule == logical_form:
                true_example += 1
            else:
                print(rule)
                print(logical_form)
                print(rule_str)
                print('*' * 80)
Esempio n. 10
0
def __run(params, single_run, fixed_votes, token):
    print(f"Running {token}")
    start_time = time.time()
    estimator = Estimator(params)
    output = estimator.run(single_run, fixed_votes)
    r.set(token, output.to_json(orient='records'))
    r.set(f"{token}_status", 'DONE')
    total_time = time.time() - start_time
    print(f"{token} DONE. Time: {total_time} seconds")
Esempio n. 11
0
 def __init__(self, node_list, gpu_per_node, job_executor):
     self.job_queue = []
     self.init_job_queue = queue.Queue()
     self.cluster_resource = self.cluster_init(node_list, gpu_per_node)
     self.gpu_per_node = gpu_per_node
     self.current_wait_job = None
     self.job_executor = job_executor
     self.unpredicted_job = queue.Queue()
     self.estimator = Estimator()
     self.msg_handler = threading.Thread(target=self._msg_handle, args=())
     self.msg_handler.start()
Esempio n. 12
0
class PlantLogic:
    def __init__(self):
        self.data_access = DataAccess()
        self.estimator = Estimator()

    def commit_measurement(
            self, plant_id: int, water: int, temperature: float, humidity: int, light: int, moisture: int)\
            -> Tuple[bool, int]:
        light_mmol = int(round(light / 54 * 3600 / 1000))
        self.data_access.save_measurement(plant_id, water, temperature,
                                          humidity, light_mmol, moisture)
        plant_data = self.data_access.get_plant_data(plant_id)
        collected_light = self.data_access.get_todays_light_exposure(plant_id)
        light_on = False
        water_time = 0
        if plant_data.dark_hours_start > time() > plant_data.dark_hours_end:
            estimated_total_light = self.estimator.estimate_total_light(
                collected_light, plant_data.dark_hours_start)
            if estimated_total_light < plant_data.light_low:
                light_on = True
        if plant_data.silent_hours_start > time(
        ) > plant_data.silent_hours_end:
            if moisture < plant_data.moisture_low + 10:
                water_time = ((
                    (plant_data.pot_size / 2)**2 * np.pi * 0.04) / 280) * 3600
        return light_on, water_time

    def configure(self, mac_address: str) -> int:
        return self.data_access.configure(mac_address)
Esempio n. 13
0
    def __init__(self, step_size=0.1, epsilon=0.1, symbol=0):
        self.step_size = step_size
        self.epsilon = epsilon
        self.previous_state = State()
        self.state = None
        self.symbol = symbol
        self.td_errors = []

        self.estimator = Estimator()
        self.policy = make_epsilon_greedy_policy(self.estimator)
        self.action = (0, 0)

        self.actions = []
        for i in range(BOARD_ROWS):
            for j in range(BOARD_COLS):
                self.actions.append((i, j))
Esempio n. 14
0
    def __init__(
        self,
        model,
        space,
        max_evals=200,
        random_seed=100,
        is_maximize=True,
        warm_start=False,
        trials_file_path=None,
        num_repeats=1,
        log_file_path=None,
        params_mapping={
            'n_estimators': int,
            'num_leaves': int,
            'max_depth': int,
            'min_samples_leaf': int
        },
    ):

        self.model_estimator = Estimator(
            model) if not model.__class__.__name__ == "Estimator" else model
        '''
        model: model for which the hyperparamter tuning is to be done
        space: space for the model
        max_evals: number of rounds to run (put max_evals as 1 if you're passing catb_default_space for hyperparameter tuning)
        trials_file_path: file to dump trails of the experiment, also loads if present to continue from where it left
        log_file_path: file path for dumping logs
        params_mapping: dict of params_key and a callable function which to map the value of the key
        '''
        if isinstance(space, dict):
            self.space = space
        elif isinstance(space, str):
            self.space = eval(space)
        else:
            raise ValueError("space not defined!")
        self.max_evals = max_evals
        self.trials_file_path = trials_file_path
        self.trials = self.load_trails()
        self.log_file_path = log_file_path
        self.params_mapping = params_mapping
        self.random_seed = random_seed
        self.warm_start = warm_start
        self.num_repeats = num_repeats
        self.is_maximize = is_maximize
        self.columns = None

        setup_logging(log_file=log_file_path)
Esempio n. 15
0
def experiment():

    train_input_fn = generate_input_fn(is_train=True,
                                       tfrecords_path=config.tfrecords_path,
                                       batch_size=config.batch_size,
                                       time_step=config.time_step)

    eval_input_fn = generate_input_fn(is_train=False,
                                      tfrecords_path=config.tfrecords_path,
                                      batch_size=config.batch_size_eval,
                                      time_step=config.time_step_eval)

    estimator = Estimator(train_input_fn=train_input_fn,
                          eval_input_fn=eval_input_fn,
                          model_fn=model_fn)

    estimator.train()
Esempio n. 16
0
def create_model():
  tokenizer = Tokenizer()
  estimator= Estimator(tokenizer)
  trainer = Trainer(tokenizer)
  estimator.load_name_cond_probs("data/probabilities/tokenized_authors_prob.txt")
  estimator.load_word_cond_probs("data/probabilities/conditional_not_a_name_prob.txt")
  # estimator.load_conditional_probabilities("data/probabilities/conditional_probs_4.txt")
  estimator.load_conditional_probabilities("data/probabilities/fold_" + str(fold) + ".txt")
  model = Model(tokenizer, estimator)
  return model
Esempio n. 17
0
def main():
    rospy.init_node("gpss_mower_controller")

    # Read and set parameters.
    robot_id = rospy.get_param("~robot_id")
    sampling_period = 1.0 / rospy.get_param("~sampling_frequency")
    horizon = rospy.get_param("~horizon")
    desire_speed = rospy.get_param("~desire_speed")
    deceleration_distance = rospy.get_param("~deceleration_distance")
    photo_activated = rospy.get_param("~photo_activated")
    print_status = rospy.get_param("~print_status")
    log_to_file = rospy.get_param("~log_to_file")
    max_speed = rospy.get_param("~max_forward_velocity")
    max_ang_speed = rospy.get_param("~max_angular_velocity")
    max_acc = rospy.get_param("~max_tan_acceleration")
    weight_x = rospy.get_param("~weight_x")
    weight_y = rospy.get_param("~weight_y")
    weight_theta = rospy.get_param("~weight_theta")
    weight_v = rospy.get_param("~weight_v")
    weight_w = rospy.get_param("~weight_w")

    min_speed = -max_speed
    min_ang_speed = -max_ang_speed
    min_acc = -max_acc
    mpc_weights = [weight_x, weight_y, weight_theta, weight_v, weight_w]
    constraints = [
        max_speed, min_speed, max_ang_speed, min_ang_speed, max_acc, min_acc
    ]

    # Run the controller
    regulator = ltv_mpc.Regulator(robot_id, sampling_period, horizon,
                                  mpc_weights, constraints)

    TaskReceptor(robot_id, regulator, desire_speed, deceleration_distance)

    tf_manager = TfMng(robot_id, photo_activated)
    if photo_activated:
        Estimator(robot_id, regulator, tf_manager, Estimator.EKF_PHOTO_ODOM,
                  print_status, log_to_file)
    else:
        Estimator(robot_id, regulator, tf_manager, Estimator.EKF_ODOM,
                  print_status, log_to_file)

    # Blocks until ROS node is shutdown.
    rospy.spin()
Esempio n. 18
0
    def fit(self, X_train, y_train):
        config = self.config.copy()
        self.fitted_objects = {}
        for model in config["Pipelines"]["Model Layer"]:
            name = model['name']
            if model["source"] == "Estimator":
                # creating estimator instance
                est = Estimator(model["model"],
                                **model["params"],
                                validation_scheme=self.validation_scheme,
                                n_splits=self.n_splits)

                # performing Hyperparameter tuning if present
                if model.get('hpt'):
                    hpt = HyperOptModelSelection(model=est,
                                                 **model['hpt']['params'])
                    hpt.fit(X_train, y_train)
                    est = hpt.best_estimator

                # fitting estimator
                est.fit_transform(X_train, y_train)

                # saving serialized fitted model
                self.fitted_objects[name] = est.to_serialized_object()

                if model.get("pickle_path"):
                    # saving model pickle, used for infrencing
                    est.save_model(model.get("pickle_path"))
Esempio n. 19
0
    def fit(self, **kwds):
        """generate a learned model, trained on the given data

    Input:
        data: a mystic legacydata.dataset (or callable model, y = model(x))
        rnd: bool, if False, treat the model as deterministic [default: True]
        cached: bool, if True, use a mystic.cache [default: False]

    NOTE:
        any additional keyword arguments will be passed to the estimator

    NOTE:
        if data is a model, estimator will use model's cached archive
        """
        self.__kwds__.update(kwds)
        self.__kwds__.update(kwds)
        cached = self.__kwds__.pop('cached', False)
        archive = data = self.__kwds__.pop('data', None)
        self.rnd = self.__kwds__.pop('rnd', self.rnd) and (bool(
            self.__kwds__.get('noise', False)))
        if callable(data):  #XXX: is a model, allow this?
            data = sample(data, bounds=None, pts=0)  #XXX: axis? multivalue?
        elif isinstance(data, type('')):  #XXX: is a name, allow this?
            import mystic.cache as mc
            import dataset as ds
            data = ds.from_archive(mc.archive.read(data))
        x = getattr(data, 'coords', getattr(data, 'x', None))
        z = getattr(data, 'values', getattr(data, 'y', None))
        from estimator import Estimator
        estm = Estimator(x, z, **self.__kwds__)
        self.__func__ = estm.Train()  #XXX: Error for zero-size?
        self.__model__ = _init_axis(estm.model)
        self.__model__.__name__ = self.__name__
        self.__kwds__['data'] = archive
        if cached:  #FIXME: clear the archive??? generate new uid name?
            self.__kwds__['cached'] = True
            self._OUQModel__init_cache()
            if hasattr(self.__model__, '__cache__'):
                c = self.__model__.__cache__()
                c.clear()
        else:
            self.__kwds__['cached'] = False
        return
def cmd_mode():
    parser = argparse.ArgumentParser()
    parser.add_argument('--image', type=str, required=True, help='input image')
    parser.add_argument('--output', type=str, default='result.png', help='output image')
    parser.add_argument('--model', type=str, default='model.h5', help='path to the weights file')

    args = parser.parse_args()
    output = args.output
    keras_weights_file = args.model

    estimator = Estimator(keras_weights_file)

    # generate image with body parts
    if os.path.isdir(args.image):
        for file in os.listdir(args.image):
            fullpath = os.path.join(os.path.abspath(args.image),file)
            fpath,fname = os.path.split(fullpath)
            name,ext = os.path.splitext(fname)
            print(fullpath)
            if not os.path.isfile(fullpath) or ext not in image_exts:
                continue
            tic = time.time()
            frame = cv2.imread(fullpath)
            canvas = estimator.process_all(frame)
            toc = time.time()
            print ('processing time is %.5fs' % (toc - tic))
            # saving
            output = os.path.join(fpath,'%s_processed%s'%(name,ext))
            cv2.imwrite(output, canvas)
    else:
        fullpath = os.path.abspath(args.image)
        fpath,fname = os.path.split(fullpath)
        name,ext = os.path.splitext(fname)
        if not os.path.isfile(fullpath) or ext not in image_exts:
            return
        tic = time.time()
        frame = cv2.imread(fullpath)
        canvas = estimator.process_all(frame)
        toc = time.time()
        print ('processing time is %.5fs' % (toc - tic))
        # saving
        output = os.path.join(fpath,'%s_processed%s'%(name,ext))
        cv2.imwrite(output, canvas)        
Esempio n. 21
0
    def __init__(self):

        self.on = True
        self.motor_on = False
        self.save_on = False
        self.mode = 0

        self.t0 = datetime.datetime.now()
        self.t = 0.0
        self.t_pre = 0.0
        self.freq_imu = 0.0
        self.freq_gps = 0.0
        self.freq_control = 0.0
        self.freq_log = 0.0

        self.x = np.zeros(3)
        self.v = np.zeros(3)
        self.a = np.zeros(3)
        self.R = np.identity(3)
        self.W = np.zeros(3)

        self.x_offset = np.zeros(3)
        self.yaw_offset = 0.0

        self.g = 9.81
        self.ge3 = np.array([0.0, 0.0, self.g])

        # Gazebo uses ENU frame, but NED frame is used in FDCL.
        self.R_fg = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0],
                              [0.0, 0.0, -1.0]])

        self.V_R_imu = np.diag([0.01, 0.01, 0.01])
        self.V_x_gps = np.diag([0.01, 0.01, 0.01])
        self.V_v_gps = np.diag([0.01, 0.01, 0.01])

        self.control = Control()
        self.control.use_integral = True  # Enable integral control

        self.estimator = Estimator()
        self.trajectory = Trajectory()

        self.lock = threading.Lock()
Esempio n. 22
0
    def estimate_price(self, stock_name: str, year: str, month: str,
                       day: str) -> float:
        """
        Estimates price of the stock sent as parameter. Based on the date which is sent as parameter, it takes the
        month from the date and filters all the stock_name stocks which have a date in the specified month and does
        the average of the mid range price (average between high and low price)
        :param stock_name: stock name for which we try to predict
        :param year:
        :param month:
        :param day:
        :return: returns the prediction
        """
        reader = Reader()
        self._dateset = reader.load_data("./dow_jones_index.csv")
        self._dateset = reader.clean_data(self._dateset)

        estimator = Estimator()
        estimator.fit(self._dateset)
        stock_prediction = estimator.predict(stock_name, year, month, day)
        return stock_prediction
Esempio n. 23
0
    def predict(self, X_test):
        config = self.config.copy()
        df = pd.DataFrame({"id": range(len(X_test))})
        for model in config["Pipelines"]["Model Layer"]:
            name = model['name']
            if model["source"] == "Estimator":
                # creating estimator instance
                if hasattr(self, 'fitted_objects'):
                    # loading from serialized object in memory
                    est = Estimator.from_serialized_object(
                        self.fitted_objects[name])
                elif model.get("pickle_path"):
                    # loading model from saved pickle
                    est = Estimator.load_model(model.get("pickle_path"))
                else:
                    raise ValueError("Model not fitted/File Not found!")

                # generating predictions
                df[name] = est.transform(X_test)

        return df
Esempio n. 24
0
 def __init__(self,
              load=None,
              filepath='best_estimator.h5',
              num_episodes=400,
              eval_episodes=20,
              update_freq=80,
              mcts_iters=100,
              tau_cutoff=20):
     self.num_episodes = num_episodes
     self.eval_episodes = eval_episodes
     self.update_freq = update_freq
     self.mcts_iters = mcts_iters
     self.tau_cutoff = tau_cutoff
     self.filepath = filepath
     to_load = load or filepath
     if os.path.isfile(to_load):
         self.estimator = Estimator(State.raw_shape,
                                    len(State.domain),
                                    filepath=to_load)
     else:
         self.estimator = Estimator(State.raw_shape, len(State.domain))
Esempio n. 25
0
    def __init__(self, args):
        self.estimator = Estimator(emb_dim=args.emb_dim,
                                   n_hidden=args.n_hidden,
                                   bidirectional=args.bi,
                                   n_layer=args.n_layer,
                                   dropout=args.dropout,
                                   lr=args.lr,
                                   decay=args.decay,
                                   lr_p=args.lr_p,
                                   clip=args.clip,
                                   batch_size=args.batch,
                                   epoch_num=args.epoch_num,
                                   cuda=args.cuda,
                                   path=args.path)

        self.transformer = Transformer(prolog_grammar.GRAMMAR_DICTIONARY,
                                       prolog_grammar.ROOT_RULE)

        self.performances = []
        self.actions = []
        self.path = args.path
Esempio n. 26
0
def run():
    pl.ion()
    pl.show()
    est = Estimator()
    a.write(b'\x01')
    window_data = np.array([[0 for i in range(WINDOW_SIZE)],
                            [0 for i in range(WINDOW_SIZE)],
                            [0 for i in range(WINDOW_SIZE)]])
    pl.show()
    index = 0
    sample_num = 0
    lastTp = time.time()
    for i in range(3):
        line, = ax.plot(window_data[i])
        lines.append(line)
    while True:
        data = a.read(14)
        raw_acc = read_data(data, 0)
        raw_gyr = read_data(data, 8)

        tp = time.time()
        dt = tp - lastTp
        lastTp = tp

        # attitude estimation
        vp = est.feed_data(dt, raw_gyr, raw_acc)
        print(vp, end="\r")
        for i in range(3):
            window_data[i][index] = vp[i] * 512

        index += 1
        sample_num += 1
        if index == WINDOW_SIZE:
            index = 0
        if sample_num == SAMPLE_SIZE:
            sample_num = 0
            ret_data = np.concatenate(
                [window_data[:, index:], window_data[:, :index]], axis=1)
            plot(ret_data)
Esempio n. 27
0
    def __init__(self, cfg):
        """
        All factors are loaded from configuration.
        """
        self.cfg = cfg
        self.voc = Vocabulary()
        self.unigram = Vocabulary()
        self.tags = list()
        self.meta_info = dict()
        self.sentences = list()

        file_name = self.cfg.file_name('use_lemma', 'word_freq', 'k', 'len_threshold',
                                       'no_delta_match', 'filter', 'preprocessed')
        file_name = self.cfg.cache('scenario', file_name)
        if os.path.exists(file_name):
            with open(file_name, 'rb') as fp:
                self.data = pickle.load(fp)
                self._init_executor()
                self._build_voc()
        else:
            measure('preprocessing')
            self.data = load_roto_wire()
            self._build_voc()
            self._convert_to_matrix()
            self._init_executor()
            self._pre_calculate()
            with open(file_name, 'wb') as fp:
                pickle.dump(self.data, fp)

        self.special_tokens = self.voc[special_tokens]
        self.estimator = Estimator(self.sentences, self.meta_info, len(self.unigram), len(self.voc),
                                   self.special_tokens, self.cfg)

        self.tag_maps = simplify_tags(self.tags)

        measure('End-trainer-init')
Esempio n. 28
0
class acc_module(threading.Thread):
    def __init__(self, feed_data_func):
        threading.Thread.__init__(self)
        self.feed_data = feed_data_func
        self.serial_port = serial.Serial('/dev/ttyACM0', 115200)
        self.est = Estimator()

    def run(self):
        lastTp = time.time()
        while True:
            data = self.serial_port.read(14)
            raw_acc = read_data(data, 0)
            raw_gyr = read_data(data, 8)

            tp = time.time()
            dt = tp - lastTp
            lastTp = tp

            # attitude estimation
            vp = self.est.feed_data(dt, raw_gyr, raw_acc)
            self.feed_data(vp)
Esempio n. 29
0
    def fit(self, X, y, columns=None):
        logger.info(
            "Starting HyperOpt {} Evals with Dataset of Shape ({},{})".format(
                self.max_evals, X.shape, y.shape))
        self.x, self.y = copy.deepcopy(X), copy.deepcopy(y)
        self.columns = columns
        if not self.warm_start: self.trials = Trials()

        best = self()
        self.save_trails()

        self.best_score = self.get_best_result()['misc']['eval_score']
        self.best_params = self.get_best_params()
        logger.info("Best Score- {}, Best Params- {}".format(
            self.best_score, self.best_params))

        model_estimator_params = self.model_estimator.get_params()
        model_estimator_params['model']['params'].update(self.best_params)
        self.best_estimator = Estimator(**model_estimator_params)
        self.best_model = self.best_estimator.model
        del self.x, self.y
        return self
Esempio n. 30
0
    def objective(self, params):
        params = self._params_mapping(params)
        self.iteration += 1
        logger.debug("\nIteration: {}, Training with params: {}".format(
            self.iteration, params))
        model_estimator_params = self.model_estimator.get_params()
        # updating model params
        model_estimator_params['model']['params'].update(params)
        model_estimator = Estimator(**model_estimator_params)
        score = evaluate(self.x,
                         self.y,
                         model_estimator,
                         num_repeats=self.num_repeats,
                         fs_individual=self.columns)

        loss = -1 * score['eval_score'] if self.is_maximize else score[
            'eval_score']

        logger.debug("Score - {}, Std - {}, Eval Score - {}".format(
            score["avg_cv_score"], np.std(score['cv_scores']),
            score["eval_score"]))
        logger.debug("Score across folds - {}.".format(score["cv_scores"]))
        return {'loss': loss, 'status': STATUS_OK, 'misc': score}
Esempio n. 31
0
parser.add_argument('-d', '--debug',
                    help="Verbose output needed", type=int, default=0)
sysargs = parser.parse_args()

# verbose printing for different debug levels
def verbose_print(level, *args):
    if level <= int(sysargs.debug):
        for arg in args:
            print arg,
    else:
        pass

e = Event()
r_arrival = RandDist(*P[sysargs.arrival_process])
r_service = RandDist(5, triangular, {'b': 0.99})
e.add_event("arrival", r_arrival)
e.add_event("serviced", r_service)

est=Estimator(1.96, "95%")
for i in range(sysargs.num_reps):
    q = do_rep(e)

    est.process_next_val(q)
    
print P[sysargs.arrival_process]
print "Point Estimate:", est.get_mean()
print est.get_conf_interval()
print est.get_rel_error()


Esempio n. 32
0
    def __init__(self,priors,data,model=None):
        self.data = data
        self.model = model

        Estimator.__init__(self,priors)