Exemplo n.º 1
0
    def __init__(self, api_host, gatsby_host, alias_host, num_threads):
        self.select_clause = "mpidStr AS \'mpid\', priceRange, aggregatedRatings, modelTitle AS \'title\', brandName, categoryNamePath, searchScore, brandName, storeId, image"
        self.page_size = 500
        self.country_code = 356
        self.gatsby_query = ProductGatsbyQuery(self.select_clause,
                                               self.page_size,
                                               self.country_code,
                                               "/products/search2",
                                               gatsby_host)
        self.gatsbyPB_query = ProductGatsbyQuery(self.select_clause,
                                                 self.page_size,
                                                 self.country_code,
                                                 "/products/search2",
                                                 gatsby_host)
        self.api_query = ProductApiQuery("http", api_host, "/v2.1/search",
                                         "IN", 50)
        self.alias_service = DataCollector(
            ProductAliasQuery("http", alias_host, "/search", "IN"))
        self.ranking_model = RankingModel()
        self.gatsby_queries = Queue(2)
        self.gatsbyPB_queries = Queue(2)
        self.api_queries = Queue(2)

        worker = Worker(DataCollector(self.gatsby_query), self.ranking_model,
                        self.gatsby_queries)
        # worker.daemon = True
        print "currently running thread:\t", worker.getName()
        worker.start()

        worker = Worker(DataCollector(self.gatsbyPB_query), self.ranking_model,
                        self.gatsbyPB_queries)
        print "currently running thread:\t", worker.getName()
        # worker.daemon = True
        worker.start()

        # for thread in range(2):
        #     worker = Worker(DataCollector(self.gatsbyPB_query), self.ranking_model, self.gatsbyPB_queries)
        #     print "currently running thread:\t", worker.getName()
        #     worker.daemon = True
        #     worker.start()
        #
        # for thread in range(2):
        #     worker = Worker(DataCollector(self.gatsby_query), self.ranking_model, self.gatsby_queries)
        #     worker.daemon = True
        #     worker.start()

        worker = Worker(DataCollector(self.api_query), self.ranking_model,
                        self.api_queries)
        print "currently running thread:\t", worker.getName()
        # worker.daemon = True
        worker.start()
Exemplo n.º 2
0
def run_random_search(verbose,
                      num_diff_experiments,
                      num_repeat_experiment,
                      allow_duplicates=False,
                      df_path=None,
                      overwrite=True,
                      data_to_collect=POSSIBLE_DATA,
                      MVP_key='waitingTime',
                      save_model=True):
    grid = load_constants('constants/constants-grid.json')

    if not allow_duplicates:
        _, num_choices = get_num_grid_choices(grid)
        num_diff_experiments = min(num_choices, num_diff_experiments)
    # Make grid choice generator
    grid_choice_gen = grid_choices_random(grid, num_diff_experiments)
    for diff_experiment, constants in enumerate(grid_choice_gen):
        data_collector_obj = DataCollector(
            data_to_collect, MVP_key, constants,
            'test' if constants['agent']['agent_type'] == 'rule' else 'eval',
            df_path, overwrite if diff_experiment == 0 else False, verbose)

        for same_experiment in range(num_repeat_experiment):
            print(' --- Running experiment {}.{} / {}.{} --- '.format(
                diff_experiment + 1, same_experiment + 1, num_diff_experiments,
                num_repeat_experiment))
            if save_model:
                data_collector_obj.set_save_model_path(
                    'models/saved_models/random_{}-{}.pt'.format(
                        diff_experiment + 1, same_experiment + 1))
            run_experiment(diff_experiment + 1, same_experiment + 1, constants,
                           data_collector_obj)
Exemplo n.º 3
0
def register(config, group_id=None):
    """
    Do registration using basic auth
    """
    username = config.get(APP_NAME, 'username')
    password = config.get(APP_NAME, 'password')
    if (((username == "") and
       (password == "") and
       (config.get(APP_NAME, 'authmethod') == 'BASIC')) and not
       (config.get(APP_NAME, 'auto_config'))):
        # Get input from user
        print "Please enter your Red Hat Customer Portal Credentials"
        sys.stdout.write('User Name: ')
        username = raw_input().strip()
        password = getpass.getpass()
        sys.stdout.write("Would you like to save these credentials? (y/n) ")
        save = raw_input().strip()
        config.set(APP_NAME, 'username', username)
        config.set(APP_NAME, 'password', password)
        logger.debug("savestr: %s", save)
        if save.lower() == "y" or save.lower() == "yes":
            logger.debug("writing user/pass to config file")
            cmd = ("/bin/sed -e 's/^username.*=.*$/username="******"/' " +
                   "-e 's/^password.*=.*$/password="******"/' " +
                   constants.default_conf_file)
            status = DataCollector().run_command_get_output(cmd, nolog=True)
            config_file = open(constants.default_conf_file, 'w')
            config_file.write(status['output'])
            config_file.flush()

    pconn = InsightsConnection(config)
    return pconn.register(group_id)
Exemplo n.º 4
0
	def __init__(self, data_collector=None):
		if data_collector is None:
			data_collector = DataCollector()
		else:
			self.data_collector = data_collector
		self.bigrams = dict()
		self.get_unique_bigrams()
Exemplo n.º 5
0
 def testRunWithError(self):
     collector = DataCollector(in_path=IN_FILE_BAD,
                               ot_path_data=OT_FILE_DATA,
                               ot_path_doc=OT_FILE_DOC)
     collector.run()
     df = pd.read_csv(OT_FILE_DATA)
     self.assertEqual(len(df["Biomodel_Id"]), 3)
Exemplo n.º 6
0
    def setup_data_threads(self, sample_time = None, decimation = None, start_threads = False):
        if self.is_slave:
            if not self.datastream_queue:
                self.datastream_queue = []

            # TODO: creare un DataPoller per ogni segnale (non per tutta la traccia) e registrare gli observer sugli specifici DataPoller. 
            if not self.datastream_poller:
                self.datastream_poller = DataPoller(self.datastream_queue, decimation)

            # TODO: Il DataCollector deve leggere il numero di segnale e usare la coda specifica per inserirci i dati
            if not self.datastream_collector:
                # TODO: Fermare i thread alla chiusura del target
                self.datastream_collector = DataCollector(self.config.getAttr('address'), self.config.getAttr('slave_data_port'), self.datastream_queue)

            if start_threads:
                print("Starting data threads...")
                if sample_time:
                    self.datastream_poller.updateSampleTime(sample_time)
                    self.datastream_poller.start()
                    self.datastream_collector.start()
                    print("Data threads started...")
                else:
                    print("Can't start data threads without a sample time")
                    return False

            return True
        else:
            print("Can't setup the data stream threads on a not-slave server")
            return False
Exemplo n.º 7
0
 def __init__(self):
     with DataCollector() as collector:
         result_channels = collector.get_telemetry_outputs()
         with DataServer(result_channels) as transmitter:
             print("Press ctrl+c to exit")
             signal.signal(signal.SIGINT, self.exit_handler)
             # TODO signal.pause() is not available on Windows systems
             signal.pause()
Exemplo n.º 8
0
def mean_std_of_states():
    env = MountainCarWithResetEnv()
    samples_to_collect = 100000
    states, actions, rewards, next_states, done_flags = DataCollector(env).collect_data(samples_to_collect)
    all_states = np.concatenate((states, next_states))
    states_mean = np.mean(all_states, axis=0)
    states_std = np.std(all_states, axis=0)
    print("states_mean: {}, states_std: {}".format(states_mean, states_std))
Exemplo n.º 9
0
def main():
    args = get_args()
    data_collector_obj = DataCollector(args.debug)
    if args.debug:
        print("Got arguments " + str(args.years) + ", " + str(args.players) +
              ", " + args.mode + ", " + str(args.singles))
    data_collector_obj.scrape_last_n_years_of_k_players(
        args.years, args.players, args.mode,
        "singles" if args.singles else "doubles")
Exemplo n.º 10
0
def training_the_model(samples_to_collect=100000, seed=100):
    number_of_kernels_per_dim = [10, 8]
    gamma = 0.999
    w_updates = 20
    evaluation_number_of_games = 50
    evaluation_max_steps_per_game = 300
    np.random.seed(seed)

    env = MountainCarWithResetEnv()
    # collect data
    states, actions, rewards, next_states, done_flags = DataCollector(
        env).collect_data(samples_to_collect)
    # get data success rate
    data_success_rate = np.sum(rewards) / len(rewards)
    print(f'Data Success Rate {data_success_rate}')
    # standardize data
    data_transformer = DataTransformer()
    data_transformer.set_using_states(
        np.concatenate((states, next_states), axis=0))
    states = data_transformer.transform_states(states)
    next_states = data_transformer.transform_states(next_states)
    # process with radial basis functions
    feature_extractor = RadialBasisFunctionExtractor(number_of_kernels_per_dim)
    # encode all states:
    encoded_states = feature_extractor.encode_states_with_radial_basis_functions(
        states)
    encoded_next_states = feature_extractor.encode_states_with_radial_basis_functions(
        next_states)
    # set a new linear policy
    linear_policy = LinearPolicy(feature_extractor.get_number_of_features(), 3,
                                 True)
    # but set the weights as random
    linear_policy.set_w(np.random.uniform(size=linear_policy.w.shape))
    # start an object that evaluates the success rate over time
    evaluator = GamePlayer(env, data_transformer, feature_extractor,
                           linear_policy)

    success_rate_vs_iteration = list()

    for lspi_iteration in range(w_updates):
        print(f'Starting LSPI iteration {lspi_iteration}')

        new_w = compute_lspi_iteration(encoded_states, encoded_next_states,
                                       actions, rewards, done_flags,
                                       linear_policy, gamma)
        norm_diff = linear_policy.set_w(new_w)

        success_rate = evaluator.play_games(evaluation_number_of_games,
                                            evaluation_max_steps_per_game)

        success_rate_vs_iteration.append(success_rate)

        if norm_diff < 0.00001:
            break

    print('LSPI Done')
    return success_rate_vs_iteration
Exemplo n.º 11
0
    def setUp(self):
        data_collector = DataCollector(config=config)

        self.data_after = data_collector.data_collector()

        path = 'devlab/tests'
        file_name = config.rollback_params['data_file_names']['PRE']
        pre_file_path = os.path.join(data_collector.main_folder, path,
                                     file_name)
        with open(pre_file_path, "r") as f:
            self.pre_data = yaml.load(f)
Exemplo n.º 12
0
    def perfrom_experiment(self, experiment_name='test', movement_list=[]):
        # 1. We save the background image:
        dc = DataCollector(only_one_shot=False, automatic=True)
        dc.get_data(get_cart=False, get_gs1=(1 in self.gs_id), get_gs2=(2 in self.gs_id), get_wsg=False, save=True, directory=experiment_name+'/air', iteration=-1)
        print "Air data gathered"

        # 2. We perfomr the experiment:
        i = 0
        if not os.path.exists(experiment_name): # If the directory does not exist, we create it
            os.makedirs(experiment_name)
        for movement in movement_list:
            path = experiment_name + '/p_' + str(i) + '/'
            self.palpate(speed=40, force_list=self.force_list, save=True, path=path)
            self.move_cart_mm(movement[0], movement[1], movement[2])
            time.sleep(6)
            i += 1
        path = experiment_name + '/p_' + str(i) + '/'
        self.palpate(speed=40, force_list=self.force_list, save=True, path=path)
Exemplo n.º 13
0
    def palpate(self, speed=40, force_list=[1, 10, 20, 40], save=False, path=''):
        # 0. We create the directory
        if save is True and not os.path.exists(path): # If the directory does not exist, we create it
            os.makedirs(path)

        # 1. We get and save the cartesian coord.
        dc = DataCollector(only_one_shot=False, automatic=True)
        cart = dc.getCart()
        if save is True:
            np.save(path + '/cart.npy', cart)

        # 2. We get wsg forces and gs images at every set force and store them
        i = 0
        for force in force_list:
            self.close_gripper_f(grasp_speed=speed, grasp_force=force)
            print "Applying: " + str(force)
            time.sleep(1.0)
            dc.get_data(get_cart=False, get_gs1=(1 in self.gs_id), get_gs2=(2 in self.gs_id), get_wsg=True, save=save, directory=path, iteration=i)
            self.open_gripper()
            time.sleep(1.0)
            i += 1
Exemplo n.º 14
0
    def __init__(self):
        self.screen = MeteoScreen()
        self.collector = DataCollector()
        self.data = InMemorySensorDataLogger()
        self.collector.add_logger(self.data)
        file_logger = ToFileSensorDataLogger('./db')
        self.collector.add_logger(file_logger)
        now = datetime.datetime.now()
        last24h_data = file_logger.load_data(now - datetime.timedelta(days=1),
                                             now)
        self.data.set_data(last24h_data)
        self.screen_update_interval = 10
        self.current_screen_draw = self.draw_screen1
        self.update_timer_stop = threading.Event()
        self.screen_update_lock = threading.RLock()

        def timer():
            while not self.update_timer_stop.is_set():
                time.sleep(self.screen_update_interval)
                self.update_screen()

        self.timer_thread = threading.Thread(target=timer)
Exemplo n.º 15
0
def run_normal(verbose,
               num_experiments=1,
               df_path=None,
               overwrite=True,
               data_to_collect=POSSIBLE_DATA,
               MVP_key='waitingTime',
               save_model=True,
               load_model_file=None):
    # if loading, then dont save
    if load_model_file:
        save_model = False

    if not df_path:
        df_path = 'run-data.xlsx'  # def. path

    # Load constants
    constants = load_constants('constants/constants.json')
    data_collector_obj = DataCollector(
        data_to_collect, MVP_key, constants,
        'test' if constants['agent']['agent_type'] == 'rule' or load_model_file
        else 'eval', df_path, overwrite, verbose)

    loaded_model = None
    if load_model_file:
        loaded_model = torch.load('models/saved_models/' + load_model_file)

    for exp in range(num_experiments):
        print(' --- Running experiment {} / {} --- '.format(
            exp + 1, num_experiments))
        if save_model:
            data_collector_obj.set_save_model_path(
                'models/saved_models/normal_{}.pt'.format(exp + 1))
        run_experiment(exp + 1,
                       None,
                       constants,
                       data_collector_obj,
                       loaded_model=loaded_model)
Exemplo n.º 16
0
    def run(self):
        """Main loop
        """
        self.startCapture()
        data_collector = DataCollector(self.dataset)

        modeFullFace = 0
        modeOneEye = 1
        modePictures = 2
        modeTwoEyes = 3
        modeImgDB = 4
        modeDemo = 5

        mode = modeDemo
        keepLoop = True
        current_t = time.clock()
        previous_t = current_t
        while keepLoop:
            pressed_key = cv2.waitKey(1)
            current_t = time.clock()
            #print('\nclock : ', current_t - previous_t)
            previous_t = current_t

            img = self.getCameraImage()

            if(mode == modeOneEye):
                ex = 300
                ey = 50
                eh = 200
                ew = 200
                face = Face(img.frame, img.canvas, 0, 0, 640, 480)
                eye = Eye(face.frame, face.canvas, ex, ey, ew, eh)
                eye.draw(face)
                eye.iris.normalizeIris()
            elif(mode == modeTwoEyes):
                face = Face(img.frame, img.canvas, 0, 0, 640, 480)
                left_eye = Eye(face.frame, face.canvas, 50, 50, 200, 200, EyeType.LEFT)
                left_eye.draw(face)
                left_eye.iris.normalizeIris()
                right_eye = Eye(face.frame, face.canvas, 400, 50, 200, 200, EyeType.RIGHT)
                right_eye.draw(face)
                right_eye.iris.normalizeIris()
            elif(mode == modeFullFace):
                face, left_eye, right_eye = img.detectEyes(self.bufferFace, self.bufferLeftEye, self.bufferRightEye)
                if face:
                    face.draw(img)
                if left_eye:
                    left_eye.draw(face)
                    left_eye.iris.normalizeIris()
                if right_eye:
                    right_eye.draw(face)
                    right_eye.iris.normalizeIris()
            elif(mode == modeImgDB):
                img_db = ImageDB("./IR_Database/MMU Iris Database/")
                print(img_db.estimateUser(img_db.bits[0]))
                exit()
            elif(mode == modeDemo):
                path = "./TB_Database/"
                face = Face(img.frame, img.canvas, 0, 0, 640, 480)
                left_eye = Eye(face.frame, face.canvas, 50, 50, 200, 200, EyeType.LEFT)
                left_eye.draw(face)
                left_eye.iris.normalizeIris()
                right_eye = Eye(face.frame, face.canvas, 400, 50, 200, 200, EyeType.RIGHT)
                right_eye.draw(face)
                right_eye.iris.normalizeIris()
                if(ord('0') <= pressed_key & 0xFF <= ord('9')):
                    print('0-9')
                    id_person = chr(pressed_key & 0xFF)
                    while((pressed_key & 0xFF) not in [ord('a'), ord('p')]):
                        pressed_key = cv2.waitKey(50)
                    print(pressed_key & 0xFF)
                    if(pressed_key & 0xFF == ord('a')):
                        cv2.imwrite(path + id_person + '/left/' + str(int(time.time() * 1000)) + '.bmp', left_eye.frame)
                        cv2.imwrite(path + id_person + '/right/' + str(int(time.time() * 1000)) + '.bmp', right_eye.frame)
            else:
                path = "./IR_Database/MMU Iris Database/"
                for dir in os.listdir(path):
                    subpath = path + dir + '/'
                    if(os.path.isdir(subpath)):
                        print(dir)
                        for subdir in os.listdir(subpath):
                            subsubpath = subpath + subdir + '/'
                            if(os.path.isdir(subsubpath)):
                                print('\t', subdir)
                                for fname in os.listdir(subsubpath):
                                    fpath = subsubpath + fname
                                    if(os.path.isfile(fpath) and os.path.splitext(fname)[1] == '.bmp'):
                                        print('\t\t', fname)
                                        img = Image(cv2.imread(fpath))
                                        face = Face(img.frame, img.canvas)
                                        eye = Eye(face.frame, face.canvas, padding=10)
                                        eye.draw(face)
                                        eye.iris.normalizeIris()
                                        img.show()
                                        pressed_key = cv2.waitKey(1000)
                exit()

            # Controls
            if pressed_key & 0xFF == ord('q'):
                keepLoop = False
            #if pressed_key & 0xFF == ord('s'):
            #    self.dataset.save()
            #if pressed_key & 0xFF == ord('l'):
            #    self.dataset.load()
            #if pressed_key & 0xFF == ord('m'):
            #    self.showMoments = not self.showMoments
            #if pressed_key & 0xFF == ord('e'):
            #    self.showEvaluation = not self.showEvaluation

            #data_collector.step(img.canvas, pressed_key, left_eye, right_eye)

            #txt = 'Dataset: {} (s)ave - (l)oad'.format(len(self.dataset))
            #cv2.putText(img.canvas, txt, (21, img.canvas.shape[0] - 29), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (32, 32, 32), 2)
            #cv2.putText(img.canvas, txt, (20, img.canvas.shape[0] - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 126, 255), 2)

            #if left_eye and right_eye:
            #    direction = self.dataset.estimateDirection(left_eye.computeMomentVectors(), right_eye.computeMomentVectors())
            #    txt = 'Estimated direction: {}'.format(direction.name)
            #    cv2.putText(img.canvas, txt, (21, img.canvas.shape[0] - 49), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (32, 32, 32), 2)
            #    cv2.putText(img.canvas, txt, (20, img.canvas.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 126, 255), 2)

            img.show()

            #if self.showEvaluation:
            #    fig = self.dataset.showValidationScoreEvolution()
            #    plt.show()
            #    self.showEvaluation = False

            #if self.showMoments:
            #    fig = self.dataset.drawVectorizedMoments()
            #    plt.show()
            #    # cv2.imshow('moments', self.fig2cv(fig))
            #    # plt.close(fig)
            #    self.showMoments = False

        self.stopCapture()
if __name__ == '__main__':
    samples_to_collect = 100000
    # samples_to_collect = 150000
    # samples_to_collect = 10000
    number_of_kernels_per_dim = [10, 8]
    gamma = 0.99
    w_updates = 100
    evaluation_number_of_games = 10
    evaluation_max_steps_per_game = 1000

    np.random.seed(123)
    # np.random.seed(234)

    env = MountainCarWithResetEnv()
    # collect data
    states, actions, rewards, next_states, done_flags = DataCollector(
        env).collect_data(samples_to_collect)
    # get data success rate
    data_success_rate = np.sum(rewards) / len(rewards)
    print(f'success rate {data_success_rate}')
    # standardize data
    data_transformer = DataTransformer()
    data_transformer.set_using_states(
        np.concatenate((states, next_states), axis=0))
    states = data_transformer.transform_states(states)
    next_states = data_transformer.transform_states(next_states)
    # process with radial basis functions
    feature_extractor = RadialBasisFunctionExtractor(number_of_kernels_per_dim)
    # encode all states:
    encoded_states = feature_extractor.encode_states_with_radial_basis_functions(
        states)
    encoded_next_states = feature_extractor.encode_states_with_radial_basis_functions(
 def __init__(self, earthquake_data, earthquake_data_clean):
     self.earthquake_data = earthquake_data
     self.earthquake_data_clean = earthquake_data_clean
     self.data_collector = DataCollector()
     self.new_location = DataCollector()
Exemplo n.º 19
0
 def setUp(self):
     self.collector = DataCollector(in_path=IN_FILE,
                                    ot_path_data=OT_FILE_DATA,
                                    ot_path_doc=OT_FILE_DOC)
Exemplo n.º 20
0
def run_lspi(seed,
             w_updates=20,
             samples_to_collect=100000,
             evaluation_number_of_games=1,
             evaluation_max_steps_per_game=200,
             thresh=0.00001,
             only_final=False):
    """
    This is the main lspi function
    :param seed: random seed for the run
    :param w_updates: how many w updates to do
    :param samples_to_collect: how many samples to collect
    :param evaluation_number_of_games: how many game evaluations to do
    :param evaluation_max_steps_per_game: how many steps to allow the evaluation game to run
    :param thresh: the threshold for the stopping condition
    :param only_final: run evaluation only at the end of the run
    :return: None
    """
    res_dir = './Results/'
    np.random.seed(seed)
    number_of_kernels_per_dim = [12, 10]
    gamma = 0.999
    env = MountainCarWithResetEnv()
    # collect data
    states, actions, rewards, next_states, done_flags = DataCollector(
        env).collect_data(samples_to_collect)
    # get data success rate
    data_success_rate = np.sum(rewards) / len(rewards)
    print('success rate: {}'.format(data_success_rate))
    # standardize data
    data_transformer = DataTransformer()
    data_transformer.set_using_states(
        np.concatenate((states, next_states), axis=0))
    states = data_transformer.transform_states(states)
    next_states = data_transformer.transform_states(next_states)
    # process with radial basis functions
    feature_extractor = RadialBasisFunctionExtractor(number_of_kernels_per_dim)
    # encode all states:
    encoded_states = feature_extractor.encode_states_with_radial_basis_functions(
        states)
    encoded_next_states = feature_extractor.encode_states_with_radial_basis_functions(
        next_states)
    # set a new linear policy
    linear_policy = LinearPolicy(feature_extractor.get_number_of_features(), 3,
                                 True)
    # but set the weights as random
    linear_policy.set_w(np.random.uniform(size=linear_policy.w.shape))
    # start an object that evaluates the success rate over time
    evaluator = GamePlayer(env, data_transformer, feature_extractor,
                           linear_policy)

    # success_rate = evaluator.play_games(evaluation_number_of_games, evaluation_max_steps_per_game)
    # print("Initial success rate: {}".format(success_rate))
    performances = []
    if not only_final:
        performances.append(
            evaluator.play_games(evaluation_number_of_games,
                                 evaluation_max_steps_per_game))
    read = False
    if read:
        with open(res_dir + 'weight.pickle', 'rb') as handle:
            new_w = pickle.load(handle)
            linear_policy.set_w(np.expand_dims(new_w, 1))
    for lspi_iteration in range(w_updates):
        print('starting lspi iteration {}'.format(lspi_iteration))

        new_w = compute_lspi_iteration(encoded_states, encoded_next_states,
                                       actions, rewards, done_flags,
                                       linear_policy, gamma)
        with open(res_dir + 'weight.pickle', 'wb') as handle:
            pickle.dump(new_w, handle, protocol=pickle.HIGHEST_PROTOCOL)

        norm_diff = linear_policy.set_w(new_w)
        if not only_final:
            performances.append(
                evaluator.play_games(evaluation_number_of_games,
                                     evaluation_max_steps_per_game))
        if norm_diff < thresh:
            break
    print('done lspi')
    if not only_final:
        with open(res_dir + 'perf' + str(seed) + '.pickle', 'wb') as handle:
            pickle.dump(performances, handle, protocol=pickle.HIGHEST_PROTOCOL)
    if only_final:
        score = evaluator.play_games(evaluation_number_of_games,
                                     evaluation_max_steps_per_game)
        with open(res_dir + 'final_perf' + str(samples_to_collect) + '.pickle',
                  'wb') as handle:
            pickle.dump(score, handle, protocol=pickle.HIGHEST_PROTOCOL)
    evaluator.play_game(evaluation_max_steps_per_game, render=True)
Exemplo n.º 21
0
def collect_data_and_upload(config, options):
    """
    All the heavy lifting done here
    """
    pconn = InsightsConnection(config)
    try:
        branch_info = pconn.branch_info()
    except requests.ConnectionError:
        logger.error("ERROR: Could not connect to determine branch information")
        sys.exit()
    except LookupError:
        logger.error("ERROR: Could not determine branch information")
        sys.exit()
    pc = InsightsConfig(config, pconn)
    dc = DataCollector()
    start = time.clock()
    collection_rules, rm_conf = pc.get_conf(options.update)
    elapsed = (time.clock() - start)
    logger.debug("Collection Rules Elapsed Time: %s", elapsed)
    start = time.clock()
    logger.info('Starting to collect Insights data')
    dc.run_commands(collection_rules, rm_conf)
    elapsed = (time.clock() - start)
    logger.debug("Command Collection Elapsed Time: %s", elapsed)
    start = time.clock()
    dc.copy_files(collection_rules, rm_conf)
    elapsed = (time.clock() - start)
    logger.debug("File Collection Elapsed Time: %s", elapsed)
    dc.write_branch_info(branch_info)
    obfuscate = config.getboolean(APP_NAME, "obfuscate")

    if not options.no_tar_file:
        tar_file = dc.done(config, rm_conf)
        if not options.no_upload:
            logger.info('Uploading Insights data,'
                        ' this may take a few minutes')
            for tries in range(options.retries):
                status = pconn.upload_archive(tar_file)
                if status == 201:
                    logger.info("Upload completed successfully!")
                    break
                else:
                    logger.error("Upload attempt %d of %d failed! Status Code: %s",
                                tries+1, options.retries, status)
                    if tries +1 != options.retries:
                        logger.info("Waiting %d seconds then retrying", constants.sleep_time)
                        time.sleep(constants.sleep_time)
                    else:
                        logger.error("All attempts to upload have failed!")
                        logger.error("Please see %s for additional information", constants.default_log_file)

            if not obfuscate and not options.keep_archive:
                dc.archive.delete_tmp_dir()
            else:
                if obfuscate:
                    logger.info('Obfuscated Insights data retained in %s',
                                os.path.dirname(tar_file))
                else:
                    logger.info('Insights data retained in %s', tar_file)
        else:
            logger.info('See Insights data in %s', tar_file)
    else:
        logger.info('See Insights data in %s', dc.archive.archive_dir)
Exemplo n.º 22
0
    pos = tuple(frame.position[:3])
    rot = tfx.tb_angles(frame.rotation)
    rot = (rot.yaw_deg, rot.pitch_deg, rot.roll_deg)

    pickle.dump({'pos': pos, 'rot': rot, 'estimate': estimate}, f)

    f.close()


psm1 = robot("PSM1")

psm1.open_gripper(80)

time.sleep(2)

d = DataCollector()

time.sleep(2)

img = cv2.medianBlur(d.left_image[:, 850:], 13)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.bilateralFilter(img, 11, 17, 17)
output = cv2.Canny(img, 100, 200)

(cnts, _) = cv2.findContours(output.copy(), cv2.RETR_TREE,
                             cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

for c in cnts:
    # approximate the contour
    peri = cv2.arcLength(c, True)
Exemplo n.º 23
0
            new_SD_list, new_not_SD = loop_through_ids(
                self.ids_social_distance)
            assert len(new_SD_list) == 0
            # Switch people
            for id in new_SD:
                self.ids_not_social_distance.remove(id)
                self.ids_social_distance.add(id)
            for id in new_not_SD:
                self.ids_social_distance.remove(id)
                self.ids_not_social_distance.add(id)
            if render:
                pygame.display.flip()
                screen.fill((0, 0, 0))
                # Frames per second
                if self.render_C['fps']: clock.tick(self.render_C['fps'])
        self.data_collect.reset(t + 1, last=True)


if __name__ == '__main__':
    constants = json.load(open('constants.json'))
    # Can save a run as an experiment which saves the data, visualizations and constants in a experiments directory
    data_collect = DataCollector(constants,
                                 save_experiment=True,
                                 print_visualizations=True)
    # Can print data (look at `data_options` at top of `data_collector.py` for options) and how often to print
    data_collect.set_print_options(basic_to_print=['S', 'I', 'R', 'death'],
                                   frequency=1)
    CA = CellularAutomation(constants, data_collect)
    # Can render each timestep with pygame
    CA.run(render=True)
Exemplo n.º 24
0
from crawler import Crawler
from schemas import Page
from data_collector import DataCollector
import validators

dc = DataCollector('localhost', 27017, '20171013-nigth')
readLinks = []


class Utils:

    invalidWords = [
        'facebook', 'fb', 'google', 'instagram', 'twitter', 'youtube', 'bing',
        'yahoo', 'ecosia', '.pdf', '.doc', '.xls', '.png', '.jpg', '.gif',
        'ads', 'goo.gl', 'microsoft', 'gmail', 'skype', 'download', 'apple',
        'pinterest', '.ppt', 'mozilla', 'microsoft', 'ebay', 'amazon',
        'newegg', 'soundcloud', 'advertise', 'advertising'
    ]

    @staticmethod
    def isValidURL(url):
        containsInvalidWord = False

        try:
            url = str(url)
            for word in Utils.invalidWords:
                if url.find(word) > -1:
                    containsInvalidWord = True
                    break

            if validators.url(url) and containsInvalidWord == False:
 def __init__(self):
     self.data_collector = DataCollector()
     self.new_location = DataCollector()
	def setup_class(cls):
		print("Setting up CLASS {0}".format(cls.__name__))
		cls.d = DataCollector()
 def __init__(self):
     self.di = DataCollector()
Exemplo n.º 28
0
    def run(self):
        """Main loop
        """
        self.startCapture()
        data_collector = DataCollector(self.dataset)

        keepLoop = True
        while keepLoop:
            pressed_key = cv2.waitKey(1)

            img = self.getCameraImage()
            face, left_eye, right_eye = img.detectEyes(self.bufferFace,
                                                       self.bufferLeftEye,
                                                       self.bufferRightEye)
            if face:
                face.draw(img)
            if left_eye:
                left_eye.draw(face)
            if right_eye:
                right_eye.draw(face)

            # Controls
            if pressed_key & 0xFF == ord('q'):
                keepLoop = False
            if pressed_key & 0xFF == ord('s'):
                self.dataset.save()
            if pressed_key & 0xFF == ord('l'):
                self.dataset.load()
            if pressed_key & 0xFF == ord('m'):
                self.showMoments = not self.showMoments
            if pressed_key & 0xFF == ord('e'):
                self.showEvaluation = not self.showEvaluation

            data_collector.step(img.canvas, pressed_key, left_eye, right_eye)

            txt = 'Dataset: {} (s)ave - (l)oad'.format(len(self.dataset))
            cv2.putText(img.canvas, txt, (21, img.canvas.shape[0] - 29),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (32, 32, 32), 2)
            cv2.putText(img.canvas, txt, (20, img.canvas.shape[0] - 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 126, 255), 2)

            if left_eye and right_eye:
                direction = self.dataset.estimateDirection(
                    left_eye.computeMomentVectors(),
                    right_eye.computeMomentVectors())
                txt = 'Estimated direction: {}'.format(direction.name)
                cv2.putText(img.canvas, txt, (21, img.canvas.shape[0] - 49),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (32, 32, 32), 2)
                cv2.putText(img.canvas, txt, (20, img.canvas.shape[0] - 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 126, 255), 2)

            img.show()

            if self.showEvaluation:
                fig = self.dataset.showValidationScoreEvolution()
                plt.show()
                self.showEvaluation = False

            if self.showMoments:
                fig = self.dataset.drawVectorizedMoments()
                plt.show()
                # cv2.imshow('moments', self.fig2cv(fig))
                # plt.close(fig)
                self.showMoments = False

        self.stopCapture()
Exemplo n.º 29
0
    parser.add_option("-k", action="store", dest="k", type="long", default=3)
    parser.add_option("--temp",
                      action="store",
                      dest="temp_rate",
                      type="float",
                      default=0.9)
    parser.add_option("--iters",
                      action="store",
                      dest="iters",
                      type="float",
                      default=100)
    parser.add_option("--max_t",
                      action="store",
                      dest="max_temp",
                      type="float",
                      default=100)
    parser.add_option("--min_t",
                      action="store",
                      dest="min_temp",
                      type="float",
                      default=0.5)
    parser.add_option("--hh", action="store", type="string", dest="hh")

    global options
    (options, _) = parser.parse_args()


# data collector configuration
global collector
collector = DataCollector()
Exemplo n.º 30
0
def collect_data_and_upload(rc=0):
    """
    All the heavy lifting done here
    Run through "targets" - could be just ONE (host, default) or ONE (container/image)
    """
    # initialize collection targets
    # for now we do either containers OR host -- not both at same time
    if InsightsClient.options.container_mode:
        logger.debug("Client running in container/image mode.")
        logger.debug("Scanning for matching container/image.")
        targets = get_targets()
    else:
        logger.debug("Host selected as scanning target.")
        targets = constants.default_target

    # if there are no targets to scan then bail
    if not len(targets):
        logger.debug("No targets were found. Exiting.")
        sys.exit(1)

    if InsightsClient.options.offline:
        logger.warning("Assuming remote branch and leaf value of -1")
        pconn = None
        branch_info = constants.default_branch_info
    else:
        pconn = InsightsConnection()

    # TODO: change these err msgs to be more meaningful , i.e.
    # "could not determine login information"
    if pconn:
        try:
            branch_info = pconn.branch_info()
        except requests.ConnectionError:
            branch_info = handle_branch_info_error(
                "Could not connect to determine branch information")
        except LookupError:
            branch_info = handle_branch_info_error(
                "Could not determine branch information")
    pc = InsightsConfig(pconn)
    tar_file = None

    if InsightsClient.options.just_upload:
        if not os.path.exists(InsightsClient.options.just_upload):
            logger.error('No file %s', InsightsClient.options.just_upload)
            return 1
        tar_file = InsightsClient.options.just_upload
        rc = _do_upload(pconn, tar_file, 'dummy', 0)
        return rc

    # load config from stdin/file if specified
    try:
        stdin_config = {}
        if InsightsClient.options.from_file:
            with open(InsightsClient.options.from_file, 'r') as f:
                stdin_config = json.load(f)
        elif InsightsClient.options.from_stdin:
            stdin_config = json.load(sys.stdin)
        if ((InsightsClient.options.from_file
             or InsightsClient.options.from_stdin)
                and ('uploader.json' not in stdin_config
                     or 'sig' not in stdin_config)):
            raise ValueError
        if ((InsightsClient.options.from_file
             or InsightsClient.options.from_stdin)
                and 'branch_info' in stdin_config
                and stdin_config['branch_info'] is not None):
            branch_info = stdin_config['branch_info']
    except:
        logger.error('ERROR: Invalid config for %s! Exiting...',
                     ('--from-file'
                      if InsightsClient.options.from_file else '--from-stdin'))
        sys.exit(1)

    start = time.clock()
    collection_rules, rm_conf = pc.get_conf(InsightsClient.options.update,
                                            stdin_config)
    collection_elapsed = (time.clock() - start)
    logger.debug("Rules configuration loaded. Elapsed time: %s",
                 collection_elapsed)

    individual_archives = []

    for t in targets:
        # defaults
        archive = None
        container_connection = None
        mp = None
        obfuscate = None
        # archive metadata
        archive_meta = {}

        try:
            if t['type'] == 'docker_image':
                container_connection = open_image(t['name'])
                logging_name = 'Docker image ' + t['name']
                archive_meta['docker_id'] = t['name']
                archive_meta['display_name'] = docker_display_name(
                    t['name'], t['type'].replace('docker_', ''))
                logger.debug('Docker display_name: %s',
                             archive_meta['display_name'])
                logger.debug('Docker docker_id: %s', archive_meta['docker_id'])
                if container_connection:
                    mp = container_connection.get_fs()
                else:
                    logger.error('Could not open %s for analysis',
                                 logging_name)
                    sys.exit(1)
            elif t['type'] == 'docker_container':
                container_connection = open_container(t['name'])
                logging_name = 'Docker container ' + t['name']
                archive_meta['docker_id'] = t['name']
                archive_meta['display_name'] = docker_display_name(
                    t['name'], t['type'].replace('docker_', ''))
                logger.debug('Docker display_name: %s',
                             archive_meta['display_name'])
                logger.debug('Docker docker_id: %s', archive_meta['docker_id'])
                if container_connection:
                    mp = container_connection.get_fs()
                else:
                    logger.error('Could not open %s for analysis',
                                 logging_name)
                    sys.exit(1)
            elif t['type'] == 'host':
                logging_name = determine_hostname()
                archive_meta['display_name'] = determine_hostname(
                    InsightsClient.options.display_name)
            else:
                logger.error('Unexpected analysis target: %s', t['type'])
                sys.exit(1)

            archive_meta['type'] = t['type'].replace('docker_', '')
            archive_meta['product'] = 'Docker'
            archive_meta['system_id'] = generate_analysis_target_id(
                t['type'], t['name'])

            collection_start = time.clock()
            archive = InsightsArchive(
                compressor=InsightsClient.options.compressor
                if not InsightsClient.options.container_mode else "none",
                target_name=t['name'])
            atexit.register(_delete_archive, archive)
            dc = DataCollector(archive,
                               InsightsClient.config,
                               mountpoint=mp,
                               target_name=t['name'],
                               target_type=t['type'])

            logger.info('Starting to collect Insights data for %s',
                        logging_name)
            dc.run_collection(collection_rules, rm_conf, branch_info)
            elapsed = (time.clock() - start)
            logger.debug("Data collection complete. Elapsed time: %s", elapsed)

            obfuscate = InsightsClient.config.getboolean(APP_NAME, "obfuscate")

            # include rule refresh time in the duration
            collection_duration = (time.clock() -
                                   collection_start) + collection_elapsed

            # add custom metadata about a host if provided by from_file
            # use in the OSE case
            if InsightsClient.options.from_file:
                with open(InsightsClient.options.from_file, 'r') as f:
                    stdin_config = json.load(f)
                    if 'metadata' in stdin_config:
                        archive.add_metadata_to_archive(
                            json.dumps(stdin_config['metadata']),
                            'metadata.json')

            if InsightsClient.options.no_tar_file:
                logger.info('See Insights data in %s', dc.archive.archive_dir)
                return rc

            tar_file = dc.done(collection_rules, rm_conf)

            # add archives to list of individual uploads
            archive_meta['tar_file'] = tar_file
            individual_archives.append(archive_meta)

        finally:
            # called on loop iter end or unexpected exit
            if container_connection:
                container_connection.close()

    # if multiple targets (container mode), add all archives to single archive
    # if InsightsClient.options.container_mode:
    if False:  # we only run single collections now (not the uber archives), bypass this
        full_archive = InsightsArchive(
            compressor=InsightsClient.options.compressor)
        for a in individual_archives:
            shutil.copy(a['tar_file'], full_archive.archive_dir)
        # don't want insights_commands in meta archive
        shutil.rmtree(full_archive.cmd_dir)
        metadata = _create_metadata_json(individual_archives)
        full_archive.add_metadata_to_archive(json.dumps(metadata),
                                             'metadata.json')
        full_tar_file = full_archive.create_tar_file(full_archive=True)
    # if only one target (regular mode), just upload one
    else:
        full_archive = archive
        full_tar_file = tar_file

    if InsightsClient.options.offline or InsightsClient.options.no_upload:
        handle_file_output(full_tar_file, full_archive)
        return rc

    # do the upload
    rc = _do_upload(pconn, full_tar_file, logging_name, collection_duration)

    if InsightsClient.options.keep_archive:
        logger.info('Insights data retained in %s', full_tar_file)
        return rc
    if obfuscate:
        logger.info('Obfuscated Insights data retained in %s',
                    os.path.dirname(full_tar_file))
    full_archive.delete_archive_dir()
    return rc