Esempio n. 1
0
 def _create_bucket(self, bucket_name):
     try:
         self.client.make_bucket(bucket_name)
     except minio.error.BucketAlreadyOwnedByYou as err:
         logger.warning(err)
     except minio.error.ResponseError as err:
         logger.error(err)
    def get_country_and_continent_from_location(loc_string, user_name,
                                                website_name):
        """
        finds user location (country and continent) from his location description
        because the api sometimes is unavailable (for several reasons) the function works in this form:
        1. try first time - in a case of a failure, log a warning and try again (with sleep time for not immediately
        try again (could lead to problem again)
        2. if there is a second error, log an ERROR message with the user details for scraping his location individually
        later (this feature will be completed in milestone 3)
        :param loc_string: user location description (str)
        :return: country and continent (str, str) or (None, None)
        """
        country, continent = None, None  # initiate the returned variables
        if not re.search(
                config.GMT_REGEX, loc_string
        ):  # handle "GMT {-8:00}" - time zone location inputted
            try:
                country, continent = GeoLocation.geolocator_process(loc_string)
            except GeocoderUnavailable:
                logger.warning(
                    config.GeocoderUnavailable_WARNING_STRING.format(
                        user_name, website_name, loc_string))

                time.sleep(config.SLEEP_TIME_FOR_LOCATIONS_API)
                try:
                    country, continent = GeoLocation.geolocator_process(
                        loc_string)
                except GeocoderUnavailable:
                    logger.error(
                        config.GeocoderUnavailable_ERROR_STRING.format(
                            user_name, website_name, loc_string))

        return country, continent
Esempio n. 3
0
    def __init__(self, _id):
        """init method """

        _funct = Functs["x2"]

        d = {
            "_id": _id,
            "year": 0,
            "funct": _funct,
            "objective": _funct["objective"],
            "interval": _funct["interval"],
            "winning_threshold": 0.005,
            "seed_parents": 12,
            "kill_rate": 0.333,
            "demography": 1,
            "average_child_numb": 0.5,
            "kill_before_reproduce": 0,
            "social_system": "normal",
            "current_population": [],
            "original_population": [],
            "_round": 7,
        }

        logger.warning(str(d))

        super().__init__(**d)
Esempio n. 4
0
    def reputation_hist(self):
        """
        user reputation for years: [2017, 2018, 2019 ,2020]
        will evaluate years only for users registered before 2017
        in case that users is registered more then 4 years ago and there is a problem,
        log a warning for checking manually the issue
        :return: reputation for each year (dict) of dict of None's
        """
        if self._member_since < UserScraper.threshold_date:
            soup_activity = Website.create_soup(self._url + "?tab=topactivity")
            source_data = soup_activity.find("div", {
                "id": "top-cards"
            }).contents[3].string
            numbers = re.search(config.REPUTATION_REGEX, source_data).group(1)
            reputation_numbers = ast.literal_eval(numbers)
            try:
                return {
                    f"reputation_{config.REPUTATION_YEARS[i]}":
                    reputation_numbers[UserScraper.year_indexes[i]]
                    for i in range(len(config.REPUTATION_YEARS))
                }

            except IndexError:
                logger.warning(
                    f"website {self._website_name} user {self._name}"
                    f" is member since more than 4 years but have reputation plot of month"
                )

        return {
            f"reputation_{config.REPUTATION_YEARS[i]}": None
            for i in range(len(config.REPUTATION_YEARS))
        }
Esempio n. 5
0
 def create_default_config_file(self):
     shutil.copy(default_file_path + "/default_config_file.json",
                 config_file_path)
     message = "Config file '%s' created.\n" % config_file_path
     message += "Please, set a valid iam role in the file field 'role' before the first execution."
     logger.warning(message)
     utils.finish_successful_execution()
Esempio n. 6
0
def _load():
    """rebuild and return an Object from json data in redis"""

    d = session["algo_dict"][session["current_algo"]]
    logger.warning(d)
    obj = _HandleAlgo(d)
    return obj
Esempio n. 7
0
    def __init__(self, id):
        """init method """

        d = {
            "_id": id,
            "year": 0,
            "funct": Functs["x2"],
            "objective": "min",
            "interval": [-100, 100],
            "winning_threshold": 0.005,
            "seed_parents": 12,
            "kill_rate": 0.33,
            "demography": 1,
            "dimension": "1D",
            "level": "easy",
            "average_child_numb": 0.5,
            "kill_before_reproduce": 1,
            "social_system": "normal",
            "current_population": [],
            "learning_curve": [],
            "original_population": [],
            "_round": 7,
            "is_won": 0,
            "won_year": -1,
            "kill_number": -1,
            "saved_people": -1,
            "new_people_number": -1,
            "new_people_composition": {},
        }

        logger.warning(str(d))

        super().__init__(**d)
Esempio n. 8
0
    def get_action(self, board, temp=1e-3, return_prob=0):
        sensible_moves = board.availables
        # the pi vector returned by MCTS as in the alphaGo Zero paper
        move_probs = np.zeros(board.width * board.height)
        if len(sensible_moves) > 0:
            actions, probs = self.mcts.get_move_probs(board, temp)

            move_probs[actions] = probs
            move = np.random.choice(actions, p=probs)

            if self._is_selfplay:
                # update the root node and reuse the search tree
                self.mcts.update_with_move(move)
            else:
                # reset the root node
                self.mcts.update_with_move(-1)
#                location = board.move_to_location(move)
#                print("AI move: %d,%d\n" % (location[0], location[1]))

            if return_prob:
                return move, move_probs
            else:
                return move
        else:
            logger.warning("[!] Game board is full!")
    def geolocator_process(loc_string):
        """
    user location will be determent by the geo-locator library. it receives the users location as written in the web
    converts it to  latitude and longitude then it will be called again to convert the latitude and longitude to
    a global unique name of country and continent.
        """
        country, continent = None, None  # initiate the returned variables
        loc = geolocator.geocode(loc_string)
        if loc:
            lat, lon = loc.latitude, loc.longitude
            time.sleep(config.SLEEP_TIME_FOR_LOCATIONS_API)
            try:
                new_loc = geolocator.reverse([lat, lon], language='en')
                country = new_loc.raw["address"]["country"]
                continent = config.continents_dict[
                    country_alpha2_to_continent_code(
                        country_name_to_country_alpha2(country))]

            except TypeError:
                logger.warning(
                    config.USER_PROBLEMATIC_COUNTRY.format(loc_string))

            except KeyError:
                if country in config.KNOWN_COUNTRIES:
                    country, continent = config.KNOWN_COUNTRIES[country]
            finally:
                time.sleep(config.SLEEP_TIME_FOR_LOCATIONS_API)
        return country, continent
Esempio n. 10
0
 def delete_log_group(self, log_group_name):
     try:
         # Delete the cloudwatch log group
         return self.get_client().delete_log_group(logGroupName=log_group_name)
     except ClientError as ce:
         if ce.response['Error']['Code'] == 'ResourceNotFoundException':
             logger.warning("Cannot delete log group '%s'. Group not found." % log_group_name)
         else:
             logger.error("Error deleting the cloudwatch log",
                          "Error deleting the cloudwatch log: %s" % ce)
Esempio n. 11
0
 def on_epoch_end(self, epoch, logs=None):
     with self.file_writer.as_default():
         for key, value in logs.items():
             try:
                 tensorflow.summary.scalar(key,
                                           value,
                                           step=epoch * self.multiplier)
             except Exception as e:
                 logger.warning(str(e))
         self.file_writer.flush()
Esempio n. 12
0
def subscriber_handler(data, context, board):
    if "data" in data:
        results_json = base64.b64decode(data["data"]).decode("utf-8")
        logger.warning("results data: {}".format(results_json))
        # results_json = data["data"].decode("utf-8")
        urls = json.loads(results_json)

        s = consume_urls_parallel(urls)
        board = board.lower()
        insert_to_gcs(board, s)
Esempio n. 13
0
 def wrapper(*args, **kwargs):
     for attempt in range(MAX_AUTO_RECONNECT_ATTEMPTS):
         try:
             return mongo_op_func(*args, **kwargs)
         except pymongo.errors.AutoReconnect as e:
             wait_t = 0.5 * pow(2, attempt)  # exponential back off
             logger.warning(
                 "PyMongo auto-reconnecting... %s. Waiting %.1f seconds.",
                 str(e), wait_t)
             time.sleep(wait_t)
Esempio n. 14
0
 def get_bucket_files(self, bucket_name, prefix_key):
     file_list = []
     if self.client.find_bucket(bucket_name):
         if prefix_key is None:
             prefix_key = ''
         result = self.client.list_files(bucket_name, key=prefix_key)
         if 'Contents' in result:
             for info in result['Contents']:
                 file_list += [info['Key']]
     else:
         logger.warning("Bucket '{0}' not found".format(bucket_name))
     return file_list
Esempio n. 15
0
 def create_log_group(self, log_group_name, tags):
     try:
         logger.debug("Creating cloudwatch log group.")
         return self.get_client().create_log_group(logGroupName=log_group_name, tags=tags)
     except ClientError as ce:
         if ce.response['Error']['Code'] == 'ResourceAlreadyExistsException':
             logger.warning("Using existent log group '%s'" % log_group_name)
             pass
         else:
             logger.error("Error creating log groups.",
                          "Error creating log groups: %s" % ce)   
             utils.finish_failed_execution() 
Esempio n. 16
0
    def run(self):

        times = deque(maxlen=10)
        schedule = lr_schedule()
        mean_iter_time = 0

        # run the training pipeline
        try:
            for i in range(self.game_batch_num):
                start = time()
                self.collect_selfplay_data(self.play_batch_size)
                mcts_time = time() - start
                logger.info("iter: {}, episode_len:{}, mcts time: {:.2f}, mean time: {:.2f}".format(i + 1,
                                                                                                    self.episode_len,
                                                                                                    mcts_time,
                                                                                                    mean_iter_time))

                if len(self.states_buffer) > self.batch_size:
                    loss, entropy = self.policy_update(learning_rate=schedule(i))

                times.append(time() - start)
                mean_iter_time = sum(times) / len(times)

                if (i + 1) % self.save_freq == 0 and not self.debug:
                    self.policy_value_net.save_model(os.path.join(self.save_dir, 'policy_{}.model'.format(i + 1)))

                # check the performance of the current model,
                if (i+1) % self.check_freq == 0:
                    logger.info("current self-play batch: {}, evaluating...".format(i+1))
                    win_ratio = self.policy_evaluate()

                    if win_ratio > self.best_win_ratio:
                        logger.info("Found new best policy, saving")
                        self.best_win_ratio = win_ratio
                        # update the best_policy
                        self.policy_value_net.save_model(os.path.join(self.save_dir, 'best_policy.model'))
                        if (self.best_win_ratio == 1.0 and
                                    self.pure_mcts_playout_num < 5000):
                            self.pure_mcts_playout_num += 1000
                            self.best_win_ratio = 0.0

        except KeyboardInterrupt:
            logger.warning('Got keyboard interrupt, saving and quiting')
            try:
                if not self.debug:
                    self.policy_value_net.save_model(os.path.join(self.save_dir, 'current_policy.model'))
            except:
                logger.error("[!] Error while saving policy net on keyboard interrupt, quiting")
Esempio n. 17
0
 def insert(self, collection_name, documents):
     try:
         collection = self.mongo_db[collection_name]
         if isinstance(documents, list):
             _id = collection.insert_many(documents,
                                          ordered=False).inserted_ids
         else:
             _id = collection.insert_one(documents).inserted_id
         return _id
     except pymongo.errors.BulkWriteError:
         logger.warning('Duplicated records.')
     except Exception as e:
         print('Exception while inserting to MongoDB', e)
         logger.debug(
             'Exception while inserting to MongoDB as exception: {}'.format(
                 e))
Esempio n. 18
0
 def folder_exists(self, folder_name):
     folder_name = '{0}/'.format(folder_name.strip('/ '))
     url = 'https://{0}{1}{2}?children'.format(self.oneprovider_host,
                                               self.cdmi_path,
                                               self.onedata_space)
     headers = {**self.cdmi_version_header, **self.onedata_auth_header}
     try:
         r = requests.get(url, headers=headers)
         if r.status_code == 200:
             if folder_name in r.json()['children']:
                 return True
     except Exception as e:
         logger.warning(
             f'Cannot check if folder "{folder_name}" exists. Error: {e}')
         return False
     return False
Esempio n. 19
0
 def delete_folder(self, folder_name):
     url = 'https://{0}{1}{2}/{3}/'.format(self.oneprovider_host,
                                           self.cdmi_path,
                                           self.onedata_space, folder_name)
     headers = {**self.cdmi_version_header, **self.onedata_auth_header}
     try:
         r = requests.delete(url, headers=headers)
         if r.status_code == 204:
             logger.info(
                 f'Folder "{folder_name}" deleted successfully in space "{self.onedata_space}"'
             )
         else:
             raise Exception(r.status_code)
     except Exception as e:
         logger.warning(
             f'Unable to delete folder "{folder_name}". Error: {e}')
Esempio n. 20
0
 def time_interval_limit(self, event_times, event_list, seconds_limit, tag):
     unqualified_time = 0
     unqualified_event_list = []
     if event_times > 1:
         for index in range(0, event_times - 1):
             job1_occure_time = event_list[index].occur_seconds
             job2_occure_time = event_list[index + 1].occur_seconds
             if (job2_occure_time - job1_occure_time) < seconds_limit:
                 unqualified_time += 1
                 logger.warning("(不合格)", tag, ", 两次事件 dump分别为:\n",
                                event_list[index].start_dump_str, "\n",
                                event_list[index + 1].start_dump_str)
                 self.unqualified_event_str = self.unqualified_event_str + "(不合格)" + tag + " 两次事件 dump分别为:\n" + event_list[
                     index].start_dump_str, "\n" + event_list[
                         index + 1].start_dump_str
                 unqualified_event_list.append(
                     event_list[index].start_dump_str)
                 unqualified_event_list.append(event_list[index +
                                                          1].start_dump_str)
Esempio n. 21
0
 def get_deployment_envvars(self, name, namespace):
     deployments_path = self.deployments_path.format(namespace)
     url = 'https://{0}:{1}{2}/{3}'.format(self.kubernetes_service_host,
                                           self.kubernetes_service_port,
                                           deployments_path,
                                           name)
     try:
         r = requests.get(url,
                          verify=self.cert_verify,
                          headers=self.auth_header)
         if r.status_code != 200:
             raise Exception(f'Error reading deployment {name} - {str(r.status_code)}\n{str(r.content)}')
         deploy = r.json()
         if len(deploy['spec']['template']['spec']['containers']) > 1:
             logger.warning('The function have more than one container. Getting environment variables from container 0')
         container_info = deploy['spec']['template']['spec']['containers'][0]
         envvars = container_info['env'] if 'env' in container_info else []
         return envvars
     except Exception as e:
         logger.error(e)
         return []