def calculate_feature(self, game_list, ignore_cache=False):

        feature_cache_path = URLHelper.cache_folder_path(
        ) + "features/" + self.get_printable_name() + ".dat"
        cached_game_list = FileHelper.read_object_from_disk(
            file_path=feature_cache_path)

        if cached_game_list is not None and not ignore_cache:
            game_list.games_df[self.get_feature_names()] = cached_game_list
            print('Feature ' + self.get_printable_name() +
                  ' loaded from cache')
        else:
            if game_list is None:
                return game_list
            elif game_list.games_df is None:
                return game_list
            elif game_list.games_df.empty:
                return game_list
            start_time = datetime.datetime.now()
            game_list = self.inner_calculate_feature(game_list)
            end_time = datetime.datetime.now()
            print('Feature ' + self.get_printable_name() + ' took ' +
                  str((end_time - start_time).total_seconds()) + ' seconds')

            if not ignore_cache:
                FileHelper.save_object_to_disk(
                    game_list.games_df[self.get_feature_names()],
                    feature_cache_path)

        return game_list
 def replace(self) -> dict:
     """
     This function determines how to replace data.
     Output:
         - dict
     """
     data = {'message': self.args['data'], 'replace_message': self.args['replace_data'], 'row': '', 'column': '', 'filename': self.filename}
     file_ext = FileHelper.get_ext(self.filename)
     if file_ext == 'csv':
         status = FileHelper.replace_in_file(
             self.filename,
             self.args['data'],
             self.args['replace_data'], 
             file_ext,
             self.args['row'],
             self.args['column'],
         )
         
         data['row'] = self.args['row']
         data['column'] = self.args['column']
     elif file_ext == 'txt':
         status = FileHelper.replace_in_file(
             self.filename,
             self.args['data'],
             self.args['replace_data'],
             file_ext
         )
     return ServiceHelper.construct_response(status, data)
    def validate_args(self):
        if self.parsed_args.command not in ['file_manager', 'network_manager']:
            self.parser.error(
                "Command invalid. Must use file manager or network manager")

        elif self.parsed_args.command == 'file_manager':
            if self.parsed_args.action == 'send' and not self.parsed_args.data:
                self.parser.error("Must specify data to send.")
            if self.parsed_args.action != "send" and self.parsed_args.new_line:
                self.parser.error("Cannot enter new line using this action.")

            if self.parsed_args.action == "replace" and FileHelper.get_ext(
                    self.parsed_args.file) == 'csv':
                if not self.parsed_args.replace_data or not self.parsed_args.data or not self.parsed_args.row or not self.parsed_args.column:
                    self.parser.error(
                        "Must specify row, column, data to replace with, and data being replaced in csv file."
                    )
            elif self.parsed_args.action == "replace" and FileHelper.get_ext(
                    self.parsed_args.file) == 'txt':
                if not self.parsed_args.replace_data or not self.parsed_args.data:
                    self.parser.error(
                        "Must specify data to replace with and data being replaced in txt file."
                    )

        elif self.parsed_args.command == 'network_manager':
            if not self.parsed_args.host or not self.parsed_args.port:
                self.parser.error("Must specify host and port.")

            if self.parsed_args.action == 'send' and not self.parsed_args.data:
                self.parser.error("Must specify data to send.")
示例#4
0
 def __init__(self, vocab_size):
     self.vocab_size = vocab_size
     self.file_helper = FileHelper()
     self.file_path = self.file_helper.get_vocabulary_path(self.vocab_size)
     if self.does_vocab_exist():
         self.load_vocab()
     else:
         self.build_vocab()
 def __send_to_file(self):
     """
     This function sends the data to the output file.
     """
     file_ext = FileHelper.get_ext(self.output_file)
     response = FileHelper.send_to_file(self.output_file, self.message,
                                        file_ext, True)
     if not response:  # TODO: quickfix.. write better error message/ use better structure.
         sys.stdout.write("Unhandled log filename specified.")
示例#6
0
    def game_list_by_url(url, league_name, season=""):
        def read_game_list_from_csv(_url, _division, _season, _league_name):

            data_frame = pd.read_csv(_url,
                                     usecols=[
                                         "HomeTeam", "AwayTeam", "Div", "Date",
                                         "FTAG", "FTHG"
                                     ]).dropna(how='all')
            data_frame["LeagueName"] = _league_name  # new feature
            data_frame["Season"] = _season  # new feature
            data_frame["SeasonId"] = data_frame["LeagueName"] + \
                                     data_frame["Div"] + \
                                     data_frame["Season"]

            # setting the draw field
            data_frame.ix[data_frame.FTAG == data_frame.FTHG, "Draw"] = True
            data_frame.ix[data_frame.FTAG != data_frame.FTHG, "Draw"] = False

            # Modeling
            data_frame["HomeTeam"] = data_frame["HomeTeam"].astype("category")
            data_frame["AwayTeam"] = data_frame["AwayTeam"].astype("category")
            data_frame["Div"] = data_frame["Div"].astype("category")
            data_frame["Season"] = data_frame["Season"].astype("category")
            data_frame["LeagueName"] = data_frame["LeagueName"].astype(
                "category")
            data_frame["Date"] = pd.to_datetime(data_frame["Date"],
                                                format="%d/%m/%y")

            current_game_list = GameList(_division, data_frame)

            return current_game_list

        # extracting the division from url
        division = url.split('/')[-1].split('.')[0]

        # extracting the season from url
        if not season:
            season = url.split('/')[-2]

        # define cache folder and file paths
        cache_folder_path = URLHelper.cache_folder_path() + league_name
        cache_file_path = cache_folder_path + "/" + division + season + ".dat"

        cached_game_list = FileHelper.read_object_from_disk(cache_file_path)

        if cached_game_list:  # if cache existed
            return cached_game_list
        else:
            game_list = read_game_list_from_csv(url, division, season,
                                                league_name)
            FileHelper.save_object_to_disk(game_list, cache_file_path)
            return game_list
示例#7
0
    def __init__(self, unique_name: str, dataset_type: DatasetType):
        super().__init__()

        self._file_helper = FileHelper()
        self._messages = self._get_message_data(unique_name, dataset_type)

        indices = self._get_indices_data(unique_name, dataset_type)
        self._raw_data = np.load(
            self._file_helper.get_input_path(dataset_type))[indices]

        self.transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToPILImage(),
            torchvision.transforms.ToTensor()
        ])
 def __init__(self, cluster_name, namespace, database_name, collection_name,
              dynamo_helper):
     self.__cluster_name = cluster_name
     self.__namespace = namespace
     self.__database_name = database_name
     self.__collection_name = collection_name
     self.__current_change = None
     self.__previous_change = None
     self.__resume_token = None
     self.__batch_id = 0
     self.__batch = []
     self.__timer = None
     self.__event = threading.Event()
     self.__fh = FileHelper()
     self.__dh = dynamo_helper
 def send(self) -> dict:
     """
     This function sends data to a file.
     Output:
         - dict
     """
     # NOTE: potentially move 'try' block to this method
     data = {'message': self.args['data'], 'filename': self.filename}
     file_ext = FileHelper.get_ext(self.filename)
     status = FileHelper.send_to_file(
         self.filename,
         self.args['data'],
         file_ext,
         self.args['new_line']
     )
     return ServiceHelper.construct_response(status, data)
    def format_message(self, action:str, log_file:str) -> str:
        """
        This function formats the log message.
        Input:
            - str
            - str
        Output:
            - str
        """
        log = getattr(self, "_{}_log".format(action))()
        logfile_ext = FileHelper.get_ext(log_file)
        if logfile_ext == 'txt':
            base = "{} - {} {}:{} [{}] - {}: {} (Source - {}:{}) (Destination - {}:{}), Size: {}, Protocol: {}"
        elif logfile_ext == 'csv':
            base = "{},{},{},{},{},{},{},{},{},{},{},{},{}"
        message = base.format(
            self.user_name,
            self.timestamp,
            self.process_name,
            self.process_id,
            self.command,
            self.status.upper(),
            log,
            self.data['sock']['source']['host'],
            self.data['sock']['source']['port'],
            self.data['sock']['destination']['host'],
            self.data['sock']['destination']['port'],
            self.data['size'],
            'TCP' # TODO: Get protocol from socket.
        )

        return message
 def format_message(self, action: str, log_file: str) -> str:
     """
     This function formats the log message.
     Input:
         - str
         - str
     Output:
         - str
     """
     log = getattr(self, "_{}_log".format(action))()
     logfile_ext = FileHelper.get_ext(log_file)
     if logfile_ext == 'txt':
         base = "{} - {} {}:{} [{}] - {}: {} ({})"
     elif logfile_ext == 'csv':
         base = "{},{},{},{},{},{},{},{}"
     message = base.format(
         self.user_name,
         self.timestamp,
         self.process_name,
         self.process_id,
         self.command,
         self.status.upper(),
         log,
         self.abs_file_path,
     )
     return message
示例#12
0
	def get_model_content_from_file(file_name: str, model_type: str, params={}):
		if 'inputs' in params and 'outputs' in params:
			return {
				SerializationHelper.__model_file_name_map.get(model_type, 'model'): open(file_name, 'rb')
			}
		return FileHelper.get_compressed_tar_file_content(
			file_name,
			SerializationHelper.get_list_of_model_file_content(model_type, params)
		)
示例#13
0
	def get_stored_options(file_name: str)->dict:
		settings = FileHelper.get_text_file_content(file_name, True)
		if not settings:
			return {}
		options = {}
		try:
			options = json.loads(settings)
		except Exception as ex:
			pass
		return options
示例#14
0
    def __init__(self, unique_name: str, dataset_type: DatasetType):
        super().__init__()

        self._unique_name = unique_name
        self._dataset_type = dataset_type

        self._file_helper = FileHelper()
        self._messages = self._get_message_data()
        
        indices = self._get_indices_data()
        self._properties = get_metadata_properties(dataset_type)[indices]
示例#15
0
	def build_caffe2_model_file(model, file_name, inputs_spec: dict, output_spec: dict):
		with open(CAFFE2_MODEL_PREDICT_FILE_NAME, 'wb') as f:
			f.write(model.net._net.SerializeToString())
		init_net = caffe2_pb2.NetDef()
		for param in model.params:
			op = core.CreateOperator(
				"GivenTensorFill",
				[],
				[param],
				arg=[
					utils.MakeArgument("shape", workspace.FetchBlob(param).shape),
					utils.MakeArgument("values", workspace.FetchBlob(param))
				]
			)
			init_net.op.extend([op])
		init_net.op.extend([core.CreateOperator(
				"ConstantFill",
				[],
				[inputs_spec['inputs']['0']['name']],
				shape=tuple(inputs_spec['inputs']['0']['shape'])
			)]
		)
		with open(CAFFE2_MODEL_INIT_FILE_NAME, 'wb') as f:
			f.write(init_net.SerializeToString())

		FileHelper.write_to_file(INPUT_SPEC_FILE_NAME, json.dumps(inputs_spec))
		FileHelper.write_to_file(OUTPUT_SPEC_FILE_NAME, json.dumps(output_spec))
		FileHelper.write_files_to_tar(
			file_name + '.caffe2',
			SerializationHelper.get_list_of_model_file_content('caffe2')
		)
		os.remove(CAFFE2_MODEL_INIT_FILE_NAME)
		os.remove(CAFFE2_MODEL_PREDICT_FILE_NAME)
		os.remove(INPUT_SPEC_FILE_NAME)
		os.remove(OUTPUT_SPEC_FILE_NAME)
 def _replace_log(self) -> str:
     """
     This function formats a message when replacing data in a file.
     Output:
         - str
     """
     file_ext = FileHelper.get_ext(self.data['filename'])
     if file_ext == 'csv':
         logger_msg = "Replacing {} to {} at ({}, {}) in {}".format(
             self.data['replace_message'], self.data['message'],
             self.data['row'], self.data['column'], self.data['filename'])
     elif file_ext == 'txt':
         logger_msg = "Replacing {} to {} in {}".format(
             self.data['replace_data'], self.data['data'],
             self.data['filename'])
     return logger_msg
示例#17
0
	def build_sklearn_model_file(model, file_name, inputs_spec: dict, output_spec: dict):
		joblib.dump(model, SKLEARN_MODEL_FILE_NAME)
		FileHelper.write_to_file(INPUT_SPEC_FILE_NAME, json.dumps(inputs_spec))
		FileHelper.write_to_file(OUTPUT_SPEC_FILE_NAME, json.dumps(output_spec))
		FileHelper.write_files_to_tar(
			file_name + '.sklearn',
			SerializationHelper.get_list_of_model_file_content('sklearn')
		)
		os.remove(SKLEARN_MODEL_FILE_NAME)
		os.remove(INPUT_SPEC_FILE_NAME)
		os.remove(OUTPUT_SPEC_FILE_NAME)
    def get_screen_shot_log_text(log_name):
        my_file = Path(
            str.format(r'{}\{}.png', TestRun.screenshots_unc_path, log_name))
        if my_file.is_file():
            return '\n\nSCREENSHOT ----->\n ![View Screenshot](' + str.format(
                '{}/{}.png', TestRun.screenshots_test_rail_virtual_directory,
                log_name) + ')'

        else:
            # Take screenshot
            pic = pyautogui.screenshot()

            # Save the image
            pic.save(
                FileHelper.resolveAgnosticPath(TestRun.screenshots_unc_path,
                                               log_name + '.png'))
            return '\n\nSCREENSHOT ----->\n ![View Screenshot](' + str.format(
                '{}/{}.png', TestRun.screenshots_test_rail_virtual_directory,
                log_name) + ')'
示例#19
0
class AgentVocab(object):
    """
    Vocab object to create vocabulary and load if exists
    """

    START_TOKEN = "<S>"

    def __init__(self, vocab_size):
        self.vocab_size = vocab_size
        self.file_helper = FileHelper()
        self.file_path = self.file_helper.get_vocabulary_path(self.vocab_size)
        if self.does_vocab_exist():
            self.load_vocab()
        else:
            self.build_vocab()

    def does_vocab_exist(self):
        return os.path.exists(self.file_path)

    def load_vocab(self):
        with open(self.file_path, "rb") as f:
            d = pickle.load(f)
            self.stoi = d["stoi"]  # dictionary w->i
            self.itos = d["itos"]  # list of words
            self.bound_idx = self.stoi[self.START_TOKEN]  # last word in vocab

    def save_vocab(self):
        with open(self.file_path, "wb") as f:
            pickle.dump({"stoi": self.stoi, "itos": self.itos}, f)

    def build_vocab(self):
        self.stoi = {}
        self.itos = []

        for i in range(self.vocab_size - 1):
            self.itos.append(str(i))
            self.stoi[str(i)] = i

        self.itos.append(self.START_TOKEN)
        self.stoi[self.START_TOKEN] = len(self.itos) - 1
        self.bound_idx = self.stoi[self.START_TOKEN]
        self.save_vocab()
示例#20
0
class MessageDataset(data.Dataset):
    def __init__(self, unique_name: str, dataset_type: DatasetType):
        super().__init__()

        self._file_helper = FileHelper()
        self._messages = self._get_message_data(unique_name, dataset_type)

        indices = self._get_indices_data(unique_name, dataset_type)
        self._raw_data = np.load(
            self._file_helper.get_input_path(dataset_type))[indices]

        self.transforms = torchvision.transforms.Compose([
            torchvision.transforms.ToPILImage(),
            torchvision.transforms.ToTensor()
        ])

    def __getitem__(self, index):
        message = self._messages[index, :]
        raw_data = self._raw_data[index, :]

        raw_data = self.transforms(raw_data)

        return message, raw_data

    def __len__(self):
        return len(self._messages)

    def _get_message_data(self, unique_name, dataset_type):
        messages_filename = f'{unique_name}.{dataset_type}.messages.npy'
        messages_data = np.load(
            os.path.join(self._file_helper.messages_folder_path,
                         messages_filename))

        return messages_data

    def _get_indices_data(self, unique_name, dataset_type):
        indices_filename = f'{unique_name}.{dataset_type}.indices.npy'
        indices_data = np.load(
            os.path.join(self._file_helper.messages_folder_path,
                         indices_filename))

        return indices_data
示例#21
0
 def _load_model_from_file(self) -> bool:
     is_file_reachable, error_message = FileHelper.is_file_reachable(
         self.get_model_file(), True)
     if not is_file_reachable:
         self.set_error(
             MindError(ERROR_CODE_MODEL_FILE_IS_UNREACHABLE, error_message,
                       [self.__class__.__name__]))
         return False
     try:
         self._set_model(self.get_model_from_file(self.get_model_file()))
     except MindException as ex:
         self.set_errors(ex.get_errors())
     except Exception as ex:
         errors = list(ex.args)
         errors.insert(0, self.get_model_file())
         errors.insert(0, self.__class__.__name__)
         self.set_error(
             MindError(ERROR_CODE_MODEL_LOAD_FAIL,
                       '{} Can`t load model from file [{}]', errors))
         return False
     return True
 def format_message(self, log_file: str) -> str:
     """
     This function formats the log message.
     Input:
         - str
     Output:
         - str
     """
     logfile_ext = FileHelper.get_ext(log_file)
     if logfile_ext == 'txt':
         base = "{} - {} {}:{} [{}] - {}: {}"
     elif logfile_ext == 'csv':
         base = "{},{},{},{},{},{},{}"
     message = base.format(
         self.user_name,
         self.timestamp,
         self.process_name,
         self.process_id,
         self.command,
         self.status.upper(),
         self.set_message(),
     )
     return message
class DocumentBatcher:
    def __init__(self, cluster_name, namespace, database_name, collection_name,
                 dynamo_helper):
        self.__cluster_name = cluster_name
        self.__namespace = namespace
        self.__database_name = database_name
        self.__collection_name = collection_name
        self.__current_change = None
        self.__previous_change = None
        self.__resume_token = None
        self.__batch_id = 0
        self.__batch = []
        self.__timer = None
        self.__event = threading.Event()
        self.__fh = FileHelper()
        self.__dh = dynamo_helper

    def initialize(self, token):
        if token is not None:
            logger.info("Initializing the document batcher with token: %s",
                        json.dumps(token, cls=JSONFriendlyEncoder))
            self.__batch_id = token["batch_id"] + 1  # use the next batch id
            self.__previous_change = json.loads(token["validation_document"])
            self.__resume_token = json.loads(token["resume_token"])
        self.__timer = RepeatedTimer(10, self.__on_time_elapsed)
        self.__timer.start()
        self.__event.set()

    def on_change_event(self, cluster_name, database_name, collection_name,
                        change):
        # full_document = change["fullDocument"]
        # TODO: What are you doing with the clustrer_name and other input parameters
        self.__event.wait()
        self.__previous_change = self.__current_change
        self.__current_change = change
        self.__batch.append(change)

    def __on_time_elapsed(self):
        self.__event.clear()
        # TODO: control passed wait in on_change_event, but not appended yet.
        # poor man's hack to handle above scenario. sleep for upto 0.1 second
        time.sleep(random.uniform(0.01, 0.1))
        # TODO: Allow saving empty batch even to help track the heartbeats
        s3_key_name = "null"
        if len(self.__batch) > 0:
            s3_key_name = "{}/{}/{}/{}-batch-{:06.0f}.json".format(
                self.__cluster_name, self.__database_name,
                self.__collection_name, self.__namespace, self.__batch_id)
            self.__write_to_s3(s3_key_name)
        self.__update_dynamodb(s3_key_name)
        self.__batch_id = self.__batch_id + 1
        self.__batch[:] = []
        self.__event.set()

    def __write_to_s3(self, s3_key_name):
        # TODO: handle any failures
        file_path = self.__create_local_batch_file()
        self.__upload_to_s3(file_path, s3_key_name)
        self.__fh.delete_file(file_path)

    def __update_dynamodb(self, s3_key_name):
        # TODO: handle any failures
        # TODO: do it in transactions
        # update watchers with namespace and current batch id, last token etc
        # insert change_events with namespace
        timestamp = datetime.utcnow().isoformat()
        watcher_item = self.__get_watcher_item(timestamp)
        change_event_item = self.__get_change_event_item(
            s3_key_name, timestamp)
        self.__dh.save_watcher(watcher_item)
        self.__dh.save_change_event(change_event_item)

    def __get_watcher_item(self, timestamp):
        token = None
        if self.__previous_change is not None:
            token = self.__previous_change["_id"]
        else:
            token = self.__resume_token
        item = {
            "watcher_id": "{}::{}".format(self.__cluster_name,
                                          self.__namespace),
            "cluster_name": self.__cluster_name,
            "namespace": self.__namespace,
            "resume_token": dumps(token),
            "validation_document": dumps(self.__current_change),
            "batch_id": self.__batch_id,
            "document_count": len(self.__batch),
            "created_timestamp": timestamp
        }
        return item

    def __get_change_event_item(self, s3_link, timestamp):
        token = None
        if self.__previous_change is not None:
            # TODO: possibly ["_id"] even on resume token
            token = self.__previous_change["_id"]
        else:
            token = self.__resume_token
        item = {
            "watcher_id": "{}::{}".format(self.__cluster_name,
                                          self.__namespace),
            "batch_status": "{}::{:06.0f}".format("false", self.__batch_id),
            "cluster_name": self.__cluster_name,
            "namespace": self.__namespace,
            "batch_id": self.__batch_id,
            "s3_link": s3_link,
            "created_timestamp": timestamp,
            "document_count": len(self.__batch),
            "is_processed": False,
            "resume_token": dumps(token),
            "processed_timestamp": "9999-12-31T00:00:00.000000"
        }
        return item

    def __create_local_batch_file(self):
        lines = []
        for item in self.__batch:
            lines.append("{}\n".format(dumps(item["fullDocument"])))
        temp_file = self.__fh.create_file()
        with open(temp_file.name, 'w') as stream:
            stream.writelines(lines)
        return temp_file.name

    def __upload_to_s3(self, file_path, key_name):
        s3h = S3Helper()
        bucket_name = os.environ['S3_CHANGE_FEED_BUCKET_NAME']
        s3h.upload(file_path, bucket_name, key_name)

    def close(self):
        logger.info("Cleaning up the Document Batcher for namespace: %s",
                    self.__namespace)
        if self.__timer is not None:
            self.__timer.stop()
            # wait until writing to s3/dynamo is done
            self.__event.wait()
    def tearDown(self):
        print("Test script: " + self._testMethodName +
              " is complete. See script logs for additional details.")

        # get the test case id decorator and log it
        errors_result_log = ''
        failures_result_log = ''
        # get the actual test outcome
        result = self.defaultTestResult(
        )  # these 2 methods have no side effects
        self._feedErrorsToResult(result, self._outcome.errors)

        error = self._list2reason(result.errors)
        failure = self._list2reason(result.failures)
        if error:
            errors_result_log = self._get_all_test_method_exceptions_result_text(
                error)
        if failure:
            failures_result_log = self._get_all_test_method_failures_result_text(
                failure)

        # status: [1= passed, 2= blocked,  4= retest, 5= failed ]
        screen_shot_text = self._testMethodName + str(random.randint(9, 99999))
        screen_shot = FileHelper.resolveAgnosticPath(
            TestRun.screenshots_unc_path, screen_shot_text + '.png')
        # str.format(
        #     r'{}\{}.png', TestRun.screenshots_unc_path, screen_shot_text)
        try:
            self.driver.save_screenshot(str(screen_shot))
        except:
            print('There may have been an issue with taking the screenshot')

        # job_url = ''
        # if TestRun.use_sauce_labs == True:
        #     sauce_status = True
        #     if failure:
        #         sauce_status = False
        #     if error:
        #         sauce_status = False
        #     try:
        #         self.sauce = SauceClient(SauceLabs.user, SauceLabs.access_key)
        #         self.sauce.jobs.update_job(self.driver.session_id, passed=sauce_status)
        #         job = self.sauce.jobs.get_job(self.driver.session_id)
        #         import hmac
        #         from hashlib import md5
        #         a = hmac.new(
        #             bytes("{}:{}".format(
        #                 SauceLabs.user,
        #                 SauceLabs.access_key),
        #                 'latin-1'),
        #             bytes(job['id'], 'latin-1'),
        #             md5)
        #
        #         auth_token = a.hexdigest()
        #         video_url = 'https://assets.saucelabs.com/jobs/{}/video.mp4?auth={}'.format(job['id'], auth_token)
        #         # video_name = job['id'] + '.mp4'
        #         # video_path = FileHelper.resolveAgnosticPath(TestRun.screenshots_unc_path, video_name)
        #         #
        #         # try:
        #         #     import urllib.request
        #         #     urllib.request.urlretrieve(video_url, video_path)
        #         # except:
        #         #     pass
        #
        #         job_url = '[CLICK TO VIEW VIDEO]({})'.format(video_url)
        #
        #         # job_url = '<script src = "https://saucelabs.com/video-embed/{}.js?auth={}"></script>'.format(
        #         #     job['id'],
        #         #     str(auth_token)
        #         # )
        #
        #         #                 # Logger.log_and_debug(99, 'info', str(assets))
        #     except Exception as e:
        #         print('SAUCE EXCEPTION: ' + str(e))
        #         pass

        if TestRun.toggle_test_rail == True:
            test_rail = TestRailApi()
            status = 1
            if failure:
                Logger.log_exception(failure)
                status = 5
            elif error:
                Logger.log_exception(error)
                status = 4
            file = ''

            try:
                file = open(Logger.current_log_file)

            except:
                print('unable to open log file!\nLOG FILE: ' +
                      Logger.current_log_file)

            with file as f:
                print('adding test result')
                result_comment = '\n'.join(f.readlines(
                )) + self.get_screen_shot_log_text(screen_shot_text)
                current_test_results = []
                for line in result_comment.split('\n'):
                    # test logs
                    if line.__contains__(self._testMethodName):
                        if line == self._get_exception_identifier():
                            continue
                        current_test_results.append(line)
                result = '\n'.join(current_test_results)
                result = result + errors_result_log
                result = result + failures_result_log

                # if TestRun.use_sauce_labs == True:
                #     result = result + '\n\n#' + job_url + '#\n\n'

                # This assumes since you have test rail toggled - you've created a test run
                test_case_id = self.test_case_id
                if test_case_id is None:
                    test_case_id = test_rail.get_test_id_by_title(
                        self._testMethodName, self.test_type.test_section_id)

                try:
                    test_rail.add_test_case_result_for_test_run(
                        TestRun.test_run_id, test_case_id, status, str(result))
                except:
                    test_case_id = test_rail.get_test_id_by_title(
                        self._testMethodName, self.test_type.test_section_id)

                    test_rail.add_test_case_result_for_test_run(
                        TestRun.test_run_id, test_case_id, status, str(result))

        try:
            self.driver.close()
            self.driver.quit()
        except:
            print(
                'there may have been an issue closing or quitting the driver.  Killing all driver processes'
            )
示例#25
0
def baseline(args):
    args = parse_arguments(args)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    file_helper = FileHelper()
    train_helper = TrainHelper(device)
    train_helper.seed_torch(seed=args.seed)

    model_name = train_helper.get_filename_from_baseline_params(args)
    run_folder = file_helper.get_run_folder(args.folder, model_name)

    logger = Logger(run_folder, print_logs=(not args.disable_print))
    logger.log_args(args)

    # get sender and receiver models and save them
    sender, receiver, diagnostic_receiver = get_sender_receiver(device, args)

    sender_file = file_helper.get_sender_path(run_folder)
    receiver_file = file_helper.get_receiver_path(run_folder)
    # torch.save(sender, sender_file)

    if receiver:
        torch.save(receiver, receiver_file)

    model = get_trainer(
        sender,
        device,
        args.dataset_type,
        receiver=receiver,
        diagnostic_receiver=diagnostic_receiver,
        vqvae=args.vqvae,
        rl=args.rl,
        entropy_coefficient=args.entropy_coefficient,
        myopic=args.myopic,
        myopic_coefficient=args.myopic_coefficient,
    )

    model_path = file_helper.create_unique_model_path(model_name)

    best_accuracy = -1.0
    epoch = 0
    iteration = 0

    if args.resume_training or args.test_mode:
        epoch, iteration, best_accuracy = load_model_state(model, model_path)
        print(
            f"Loaded model. Resuming from - epoch: {epoch} | iteration: {iteration} | best accuracy: {best_accuracy}"
        )

    if not os.path.exists(file_helper.model_checkpoint_path):
        print("No checkpoint exists. Saving model...\r")
        torch.save(model.visual_module, file_helper.model_checkpoint_path)
        print("No checkpoint exists. Saving model...Done")

    train_data, valid_data, test_data, valid_meta_data, _ = get_training_data(
        device=device,
        batch_size=args.batch_size,
        k=args.k,
        debugging=args.debugging,
        dataset_type=args.dataset_type,
    )

    train_meta_data, valid_meta_data, test_meta_data = get_meta_data()

    pytorch_total_params = sum(p.numel() for p in model.parameters())

    if not args.disable_print:
        # Print info
        print("----------------------------------------")
        print("Model name: {} \n|V|: {}\nL: {}".format(model_name,
                                                       args.vocab_size,
                                                       args.max_length))
        print(sender)
        if receiver:
            print(receiver)

        if diagnostic_receiver:
            print(diagnostic_receiver)

        print("Total number of parameters: {}".format(pytorch_total_params))

    model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # Train
    current_patience = args.patience
    best_accuracy = -1.0
    converged = False

    start_time = time.time()

    if args.test_mode:
        test_loss_meter, test_acc_meter, _ = train_helper.evaluate(
            model, test_data, test_meta_data, device, args.rl)

        average_test_accuracy = test_acc_meter.avg
        average_test_loss = test_loss_meter.avg

        print(
            f"TEST results: loss: {average_test_loss} | accuracy: {average_test_accuracy}"
        )
        return

    while iteration < args.iterations:
        for train_batch in train_data:
            print(f"{iteration}/{args.iterations}       \r", end="")

            # !!! This is the complete training procedure. Rest is only logging!
            _, _ = train_helper.train_one_batch(model, train_batch, optimizer,
                                                train_meta_data, device)

            if iteration % args.log_interval == 0:

                if not args.rl:
                    valid_loss_meter, valid_acc_meter, _, = train_helper.evaluate(
                        model, valid_data, valid_meta_data, device, args.rl)
                else:
                    valid_loss_meter, hinge_loss_meter, rl_loss_meter, entropy_meter, valid_acc_meter, _ = train_helper.evaluate(
                        model, valid_data, valid_meta_data, device, args.rl)

                new_best = False

                average_valid_accuracy = valid_acc_meter.avg

                if (average_valid_accuracy < best_accuracy
                    ):  # No new best found. May lead to early stopping
                    current_patience -= 1

                    if current_patience <= 0:
                        print("Model has converged. Stopping training...")
                        converged = True
                        break
                else:  # new best found. Is saved.
                    new_best = True
                    best_accuracy = average_valid_accuracy
                    current_patience = args.patience
                    save_model_state(model, model_path, epoch, iteration,
                                     best_accuracy)

                metrics = {
                    'loss': valid_loss_meter.avg,
                    'accuracy': valid_acc_meter.avg,
                }
                if args.rl:
                    metrics['hinge loss'] = hinge_loss_meter.avg
                    metrics['rl loss'] = rl_loss_meter.avg
                    metrics['entropy'] = entropy_meter.avg

                logger.log_metrics(iteration, metrics)

            iteration += 1
            if iteration >= args.iterations:
                break

        epoch += 1

        if converged:
            break

    return run_folder
示例#26
0
	def get_model_from_file(self, file_name: str):
		self.__model_file_content = FileHelper.get_compressed_tar_file_content(
			file_name,
			['init_net.pb', 'predict_net.pb', 'input_spec.json', 'output_spec.json']
		)
		return workspace.Predictor(self.__model_file_content['init_net.pb'], self.__model_file_content['predict_net.pb'])
示例#27
0
def baseline(args):
    args = parse_arguments(args)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    file_helper = FileHelper()
    train_helper = TrainHelper(device)
    train_helper.seed_torch(seed=args.seed)

    model_name = train_helper.get_filename_from_baseline_params(args)
    run_folder = file_helper.get_run_folder(args.folder, model_name)

    metrics_helper = MetricsHelper(run_folder, args.seed)

    # get sender and receiver models and save them
    sender, receiver, diagnostic_receiver = get_sender_receiver(device, args)

    sender_file = file_helper.get_sender_path(run_folder)
    receiver_file = file_helper.get_receiver_path(run_folder)
    # torch.save(sender, sender_file)

    if receiver:
        torch.save(receiver, receiver_file)

    model = get_trainer(
        sender,
        device,
        args.dataset_type,
        receiver=receiver,
        diagnostic_receiver=diagnostic_receiver,
        vqvae=args.vqvae,
        rl=args.rl,
        entropy_coefficient=args.entropy_coefficient,
        myopic=args.myopic,
        myopic_coefficient=args.myopic_coefficient,
    )

    model_path = file_helper.create_unique_model_path(model_name)

    best_accuracy = -1.0
    epoch = 0
    iteration = 0

    if args.resume_training or args.test_mode:
        epoch, iteration, best_accuracy = load_model_state(model, model_path)
        print(
            f"Loaded model. Resuming from - epoch: {epoch} | iteration: {iteration} | best accuracy: {best_accuracy}"
        )

    if not os.path.exists(file_helper.model_checkpoint_path):
        print("No checkpoint exists. Saving model...\r")
        torch.save(model.visual_module, file_helper.model_checkpoint_path)
        print("No checkpoint exists. Saving model...Done")

    train_data, valid_data, test_data, valid_meta_data, _ = get_training_data(
        device=device,
        batch_size=args.batch_size,
        k=args.k,
        debugging=args.debugging,
        dataset_type=args.dataset_type,
    )

    train_meta_data, valid_meta_data, test_meta_data = get_meta_data()

    # dump arguments
    pickle.dump(args, open(f"{run_folder}/experiment_params.p", "wb"))

    pytorch_total_params = sum(p.numel() for p in model.parameters())

    if not args.disable_print:
        # Print info
        print("----------------------------------------")
        print(
            "Model name: {} \n|V|: {}\nL: {}".format(
                model_name, args.vocab_size, args.max_length
            )
        )
        print(sender)
        if receiver:
            print(receiver)

        if diagnostic_receiver:
            print(diagnostic_receiver)

        print("Total number of parameters: {}".format(pytorch_total_params))

    model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # Train
    current_patience = args.patience
    best_accuracy = -1.0
    converged = False

    start_time = time.time()

    if args.test_mode:
        test_loss_meter, test_acc_meter, _ = train_helper.evaluate(
            model, test_data, test_meta_data, device, args.rl
        )

        average_test_accuracy = test_acc_meter.avg
        average_test_loss = test_loss_meter.avg

        print(
            f"TEST results: loss: {average_test_loss} | accuracy: {average_test_accuracy}"
        )
        return

    iterations = []
    losses = []
    hinge_losses = []
    rl_losses = []
    entropies = []
    accuracies = []

    while iteration < args.iterations:
        for train_batch in train_data:
            print(f"{iteration}/{args.iterations}       \r", end="")

            ### !!! This is the complete training procedure. Rest is only logging!
            _, _ = train_helper.train_one_batch(
                model, train_batch, optimizer, train_meta_data, device
            )

            if iteration % args.log_interval == 0:

                if not args.rl:
                    valid_loss_meter, valid_acc_meter, _, = train_helper.evaluate(
                        model, valid_data, valid_meta_data, device, args.rl
                    )
                else:
                    valid_loss_meter, hinge_loss_meter, rl_loss_meter, entropy_meter, valid_acc_meter, _ = train_helper.evaluate(
                        model, valid_data, valid_meta_data, device, args.rl
                    )

                new_best = False

                average_valid_accuracy = valid_acc_meter.avg

                if (
                    average_valid_accuracy < best_accuracy
                ):  # No new best found. May lead to early stopping
                    current_patience -= 1

                    if current_patience <= 0:
                        print("Model has converged. Stopping training...")
                        converged = True
                        break
                else:  # new best found. Is saved.
                    new_best = True
                    best_accuracy = average_valid_accuracy
                    current_patience = args.patience
                    save_model_state(model, model_path, epoch, iteration, best_accuracy)

                # Skip for now  <--- What does this comment mean? printing is not disabled, so this will be shown, right?
                if not args.disable_print:

                    if not args.rl:
                        print(
                            "{}/{} Iterations: val loss: {}, val accuracy: {}".format(
                                iteration,
                                args.iterations,
                                valid_loss_meter.avg,
                                valid_acc_meter.avg,
                            )
                        )
                    else:
                        print(
                            "{}/{} Iterations: val loss: {}, val hinge loss: {}, val rl loss: {}, val entropy: {}, val accuracy: {}".format(
                                iteration,
                                args.iterations,
                                valid_loss_meter.avg,
                                hinge_loss_meter.avg,
                                rl_loss_meter.avg,
                                entropy_meter.avg,
                                valid_acc_meter.avg,
                            )
                        )

                iterations.append(iteration)
                losses.append(valid_loss_meter.avg)
                if args.rl:
                    hinge_losses.append(hinge_loss_meter.avg)
                    rl_losses.append(rl_loss_meter.avg)
                    entropies.append(entropy_meter.avg)
                accuracies.append(valid_acc_meter.avg)

            iteration += 1
            if iteration >= args.iterations:
                break

        epoch += 1

        if converged:
            break

    # prepare writing of data
    dir_path = os.path.dirname(os.path.realpath(__file__))
    dir_path = dir_path.replace("/baseline", "")
    timestamp = str(datetime.datetime.now())
    filename = "output_data/vqvae_{}_rl_{}_dc_{}_gs_{}_dln_{}_dld_{}_beta_{}_entropy_coefficient_{}_myopic_{}_mc_{}_seed_{}_{}.csv".format(
        args.vqvae,
        args.rl,
        args.discrete_communication,
        args.gumbel_softmax,
        args.discrete_latent_number,
        args.discrete_latent_dimension,
        args.beta,
        args.entropy_coefficient,
        args.myopic,
        args.myopic_coefficient,
        args.seed,
        timestamp,
    )
    full_filename = os.path.join(dir_path, filename)

    # write data
    d = [iterations, losses, hinge_losses, rl_losses, entropies, accuracies]
    export_data = zip_longest(*d, fillvalue="")
    with open(full_filename, "w", encoding="ISO-8859-1", newline="") as myfile:
        wr = csv.writer(myfile)
        wr.writerow(
            ("iteration", "loss", "hinge loss", "rl loss", "entropy", "accuracy")
        )
        wr.writerows(export_data)
    myfile.close()

    # plotting
    print(filename)
    plot_data(filename, args)

    return run_folder
示例#28
0
	def __get_model_class_by_file_name(self, file_name: str, model_type=None)->type:
		_model_type = model_type if model_type else FileHelper.get_file_extension(file_name)
		_model_type_map = self.get_model_to_file_extension_dictionary().get(_model_type, None)
		if not _model_type_map:
			return None
		return ModuleHelper.get_class_for(_model_type_map['module'], _model_type_map['model'])
示例#29
0
 def __read_content_config_file(self):
     json_data = FileHelper.read_data_from_file(self.path)
     self.check_game_map_json(json_data)
     return json.loads(json_data)