def build_output_response(recordId, label, error, cluster_labels):
    """

    :param inputs: The inputs gathered from the extraction process
    :param outputs: The outputs object - power skill output
    :param cluster_labels: The provided labels for the clusters
    :return: The json response object
    """
    values = ObjDict()
    values.values = []
    entity_values = {}

    if len(cluster_labels) > 0:
        entity_values['label'] = label
    else:
        if len(label) > 0:
            entity_values['label'] = int(label[0])
        else:
            entity_values['label'] = ''

    if len(error) > 0:
        errors = [error]
    else:
        errors = ""

    values.values.append({"recordId": recordId, \
                          "errors": errors,
                          "data": entity_values,
                          "warnings": ""})

    return values
Exemplo n.º 2
0
    def json_data_for_trade(self):

        rows = list()

        # meta
        ticker = self.ticker
        date = self.prediction_date
        total_score = self.total_score
        id = "%s-%s-%s" % (ticker, date, total_score)

        meta = {
            "index": {
                "_index": TRADE_INDEX_NAME,
                "_type": TRADE_TYPE_NAME,
                "_id": id
            }
        }
        rows.append(json.dumps(meta))

        # data
        row = ObjDict()
        row.total_score = total_score
        row.timestamp = self.prediction_date
        row.ticker = self.ticker
        rows.append(json.dumps(row))

        return rows
Exemplo n.º 3
0
    def fill_meta(self):
        super(DescriptionsRemaker, self).fill_meta()

        self.meta_remaked.descriptions = ObjDict()

        for description_index, description in self.meta_decompiled.data.descriptions.items(
        ):
            if description.content:
                description_title = ""
                with open(
                        description.content.data.title.replace(
                            "decompiled://", self.PATH_PHASE_DECOMPILED),
                        "rb") as f:
                    description_title_temp = f.read()

                for char_index, char in enumerate(description_title_temp):
                    if char < 32:
                        break
                    elif char < 128:
                        description_title += chr(char)
                    else:
                        description_title += self.CHARTABLE[char - 128]

                data_description = ObjDict()
                data_description.title = description_title

                self.meta_remaked.descriptions[
                    description_index] = data_description
Exemplo n.º 4
0
def build_output_response(inputs, outputs, error=None):
    """

    :param inputs: The inputs gathered from the extraction process
    :param outputs: The outputs object - power skill output
    :return: The json response object
    """
    values = ObjDict()
    values.values = []
    entity_values = {}
    entities = []

    entity_values['modelName'] = 'Your model'
    entity_values['language'] = 'EN'
    entity_values['text'] = 'Your prediction'
    entities.append(entity_values)
    entity_values = {}
    errors = ''
    values.values.append({'recordId': inputs['values'][0]['recordId'], \
                          'correlationId': inputs['values'][0]['data']['correlationId'],
                          'batch': inputs['values'][0]['data']['batch'],
                          "errors": errors,
                          "data": entity_values,
                          "warnings": ""})

    return values
Exemplo n.º 5
0
    def json_data_for_outcome(self, day, outcome, score):

        rows = list()

        # meta
        ticker = day['_source.ticker']
        date = day['_source.timestamp']
        vector = outcome
        id = "%s-%s-%s" % (ticker, date, vector)

        meta = {"index": {"_index": INDEX_NAME, "_type": TYPE_NAME, "_id": id}}
        rows.append(json.dumps(meta))

        # data
        row = ObjDict()
        row.frac_change = outcome[0]
        row.frac_high_range = outcome[1]
        row.frac_low_range = outcome[2]
        open_price = day['_source.open'].values[0]
        predicted_close = open_price * (1 + outcome[0])
        expected_value = outcome[0] * score
        row.predicted_close = predicted_close
        row.expected_value = expected_value
        row.timestamp = day['_source.timestamp'].values[0]
        row.score = score
        row.ticker = day['_source.ticker'].values[0]
        rows.append(json.dumps(row))

        return rows
Exemplo n.º 6
0
 def __init__(self, name, current=None):
     ObjDict.__init__(self)
     del self["__type__"]  # Delete __type__ field
     self["name"] = name
     if current:
         self["current"] = current
     return
Exemplo n.º 7
0
class Config:
    def __init__(self, filename):
        self.filename = self.get_path(filename)
        self.bot = ObjDict()
        self.config = ObjDict()
        self.check()

        # for modules
        self.messages = {}
        self.mentionLastFind = datetime.datetime.now()
        self.LastMyMessage = {}

    @staticmethod
    def get_path(p):
        return f"{os.getcwd()}/configs/{p}"

    def load(self, ):
        with codecs.open(self.filename, "r", "utf-8-sig") as file:
            self.config = ObjDict(json.load(file))

    def save(self):
        with codecs.open(self.filename, "w", "utf-8-sig") as file:
            json.dump(self.config, file, ensure_ascii=False, indent=4)

    def check(self):
        if not isfile(self.get_path('config.json')):
            try:
                shutil.copy(self.get_path('config.json.sample'),
                            self.get_path('config.json'))
                exit("Настрой файл config.json")
            except Exception as s:
                print("Проверьте ваши права на данную папку!")
                print(s)
                exit()

        else:
            self.load()
            for c, v in self.config.items():
                try:
                    if v == "":
                        raise ValueError
                except AttributeError:
                    print(
                        "У тебя неправильно настроен конфиг. Перезапусти скрипт и настрой config.json"
                    )
                    exit()
                except Exception as s:
                    print(f"[config.json] {c} is empty.")
                    print(s)
                    exit()

    def add_value(self, attr, value):
        self.config.update({attr: value})

    # def del_value(self, attr):
    #     self.config

    def __repr__(self):
        return str(self.config.items())
Exemplo n.º 8
0
    def __init__(self, issue, source, source_index):
        super(ScreensDecompiler, self).__init__(issue, source, source_index)

        self.PATTERN_FILE_SCREEN = "%s%03d.json"

        self.PATTERN_DECOMPILED_SCREEN = "decompiled://%s/%s/%s/%03d.json"

        self.counts = ObjDict()
Exemplo n.º 9
0
    def __init__(self, issue, source, source_index):
        super(TextsRemaker, self).__init__(issue, source, source_index)

        self.CHARTABLE = u"ČüéďäĎŤčěĚĹÍľĺÄÁÉžŽôöÓůÚýÖÜŠĽÝŘťáíóúňŇŮÔšřŕŔ¼§▴▾                           Ë   Ï                 ß         ë   ï ±  ®©  °   ™   "
        self.fonts = ObjDict()

        print("Loading fonts...")

        #if self.issue.number == "quo-1999-05":
        #index_max = 2
        #elif self.issue.number >= "28":
        if self.issue.number >= "28":
            index_max = 4
        else:
            index_max = 2

        for index in tqdm(
                range(0, index_max),
                desc="fonts",
                ascii=True,
                leave=False,
                bar_format=
                "{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]"):
            if index == 0 or self.source.version > 1:
                with open(
                        "%s%s/%s/%s.json" %
                    (self.PATH_PHASE_REMAKED, self.issue.number, "fonts",
                     index), "r") as f:
                    #content = f.read()
                    lines = f.readlines()  # TODO
                    content = ''.join(lines)  # TODO
                    self.fonts[str(index)] = ObjDict(content)

                for font_index, font in tqdm(
                        self.fonts[str(index)].fonts.items(),
                        total=len(self.fonts[str(index)].fonts),
                        desc="characters",
                        ascii=True,
                        leave=False,
                        bar_format=
                        "{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]"
                ):
                    for font_variant_index, font_variant in font.items():
                        for character_index, character in font_variant.characters.items(
                        ):
                            with Image.open(
                                    character.asset.replace(
                                        "remaked://",
                                        self.PATH_PHASE_REMAKED)) as i:
                                character.image = i.copy()

        self.PATTERN_PATH_TEXT = "%s%s" % (self.PATH_DATA_REMAKED, "%03d/")

        self.PATTERN_FILE_TEXT_ASSET = "%s%d.png"
        self.PATTERN_FILE_TEXT_ASSET_INVERSE = "%s%d_inverse.png"
        self.PATTERN_FILE_TEXT_PLAIN = "%s%d.txt"
        self.PATTERN_FILE_TEXT_LINKS = "%s%d.json"
Exemplo n.º 10
0
    def __init__(self, filename):
        self.filename = self.get_path(filename)
        self.bot = ObjDict()
        self.config = ObjDict()
        self.check()

        # for modules
        self.messages = {}
        self.mentionLastFind = datetime.datetime.now()
        self.LastMyMessage = {}
Exemplo n.º 11
0
def measuresComponent():
    connection = MongoClient(MONGODB_HOST, MONGODB_PORT)
    collProject = connection[DB_NAME][COLLECTION_PROJECTS]
    collMetrics = connection[DB_NAME][COLLECTION_METRICS]
    
    
    url = 'https://sonarcloud.io/api/measures/component'
    query = {'component': 'monica', 'metricKeys': 'ncloc,complexity,violations'}
    r = requests.get(url, params=query)
    measures_dict = r.json()
    print(measures_dict)
    
    #extract, create and insert a dict with project information
    data = ObjDict()
    data.id = measures_dict['component'].get('id') 
    data.key = measures_dict['component'].get('key')
    data.name = measures_dict['component'].get('name')
    data.description = measures_dict['component'].get('description')
    data.qualifier = measures_dict['component'].get('qualifier')
    data.language = measures_dict['component'].get('language', '')
    data.path = measures_dict['component'].get('path', '')
    collProject.insert_one(data)
    print(data);
    
    #insert measures by adding project id    
    for measure in measures_dict['component']['measures']:
        measure['projectId'] = data.id
        collMetrics.insert_one(measure)
        print(measure)
    
    return measures_dict
Exemplo n.º 12
0
    def parse(self, response):
        domain = response.meta['domain']
        filename = response.meta['filename']
        for _item in response.css('item'):
            item = ObjDict()
            item.reference = _item.css('link::text').extract_first()
            if domain == self.get_domain(item.reference):
                result = self.conn.execute(
                    "SELECT COUNT(*) FROM store WHERE hash='" +
                    item.reference + "'").fetchone()
                if result[0] == 0:
                    self.conn.execute("INSERT INTO store VALUES ('" +
                                      item.reference + "')")
                    item.title = _item.css('title::text').extract_first()
                    item.abstract = _item.css(
                        'description::text').extract_first()
                    timestamp = parse(
                        _item.css('pubDate::text').extract_first()).strftime(
                            '%Y-%m-%d %H:%M:%S')
                    item['timestamp'] = timestamp
                    item.language = detect(item.abstract)
                    item.source = domain
                    item.filename = filename
                    item.contentType = 'text/html'

                    parser = ArticleParser(
                        contentCss=response.meta['contentCss'])
                    request = scrapy.Request(item.reference, parser.process)
                    request.meta['item'] = item
                    yield request
Exemplo n.º 13
0
	def _variant_content_init(self):
		self.data_variant.content.offset_linktable = self.variant_content.offset_linktable
		self.data_variant.content.count_linktable = self.variant_content.count_linktable
		self.data_variant.content.linktable_meta = ObjDict()
		self.data_variant.content.linktable = ObjDict()
		self.data_variant.content.count_linetable_meta = self.variant_content.count_linetable_meta
		self.data_variant.content.offset_linetable_meta = self.variant_content.offset_linetable_meta
		self.data_variant.content.linetable_meta = ObjDict()
		self.data_variant.content.count_palettetable = self.variant_content.count_palettetable
		self.data_variant.content.offset_palettetable = self.variant_content.offset_palettetable
		self.data_variant.content.palettetable = ObjDict()
		self.data_variant.content.linetable = ObjDict()
Exemplo n.º 14
0
	def fill_scheme(self):
		super(MusicRemaker, self).fill_scheme()

		self.scheme.mods = ObjDict()

		for mod_index, mod in self.meta.data.mods.items():
			if mod.content:
				data_mod = ObjDict()
				#data_mod.width = mod.content.width
				#data_mod.height = mod.content.height
				data_mod.asset = "assets://%s/%s/%s/%04d.mod" % (self.issue.number, self.source.library, self.source_index, int(mod_index))

				self.scheme.mods[mod_index] = data_mod
Exemplo n.º 15
0
def convert_to_attachment(attachment, attachment_type=None):
    if "type" in attachment and attachment["type"] in attachment:
        body = attachment[attachment["type"]]
        attachment_type = attachment["type"]
    else:
        body = attachment

    if "sizes" in body:
        m_s_ind = -1
        m_s_wid = 0

        for i, size in enumerate(body["sizes"]):
            if size["width"] > m_s_wid:
                m_s_wid = size["width"]
                m_s_ind = i

        link = body["sizes"][m_s_ind]["url"]  # src

    elif "url" in body:
        link = body["url"]

    else:
        link = None

    Attachment = ObjDict()

    Attachment.type = attachment_type
    Attachment.id = body.get("id")
    Attachment.owner_id = body.get("owner_id")
    Attachment.access_key = body.get("access_key")
    Attachment.link = link
    Attachment.rawattach = attachment
    return Attachment
Exemplo n.º 16
0
    def fill_meta_fat(self):
        self.meta.fat = ObjDict()
        self.meta.fat.offsets = ObjDict()

        for offset_index, offset in enumerate(
                tqdm(
                    self.library.fat.offsets,
                    desc="fat.offsets",
                    ascii=True,
                    leave=False,
                    bar_format=
                    "{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]"
                )):
            self.meta.fat.offsets[str(offset_index + 1)] = offset
Exemplo n.º 17
0
	def _variant_content_linetable_meta(self):
		if self.variant_content.linetable_meta:
			for linetable_meta_index, linetable_meta in enumerate(tqdm(self.variant_content.linetable_meta, desc="linetable_meta", ascii=True, leave=False, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]")):
				data_linetable_meta = ObjDict()
				data_linetable_meta.param_offset = linetable_meta.param_offset
				data_linetable_meta.content = ObjDict()

				data_linetable_meta.content.offset = linetable_meta.content.offset
				data_linetable_meta.content.height = linetable_meta.content.height
				data_linetable_meta.content.top = linetable_meta.content.top
				#data_linetable_meta.content.foo = linetable_meta.content.foo # TODO
				data_linetable_meta.content.foo = ''

				self.data_variant.content.linetable_meta[str(linetable_meta_index)] = data_linetable_meta
Exemplo n.º 18
0
    def get(self):
        number_of_files = 100

        result = service.get_files_as_regularsurfaces(number_of_files)

        l = len(result)
        result = None
        gc.collect()

        data = ObjDict()
        data.number_of_thread = l
        data.cpu_count = os.cpu_count()
        data.gc_get_count = list(gc.get_count())

        return jsonify(data)
Exemplo n.º 19
0
    def get(self):
        number_of_files = 400

        result = service.get_files_as_streams_thread(number_of_files)
        #result = service.get_files_as_bytes_thread(number_of_files)
        l = len(result)
        result = None
        gc.collect()

        data = ObjDict()
        data.number_of_thread = l
        data.cpu_count = os.cpu_count()
        data.gc_get_count = list(gc.get_count())

        return jsonify(data)
Exemplo n.º 20
0
    def __init__(self, issue, source, source_index):
        super(ImagesRemaker, self).__init__(issue, source, source_index)

        self.descriptions = ObjDict()

        print("Loading descriptions...")

        if self.issue.number >= "06":
            with open(
                    "%s%s/%s/0.json" %
                (self.PATH_PHASE_REMAKED, self.issue.number, "descriptions"),
                    "r") as f:
                lines = f.readlines()  # TODO
                content = ''.join(lines)  # TODO
                self.descriptions = ObjDict(content)
Exemplo n.º 21
0
def write_to_json(to_be_compared):
    """
    This function is used to create json output of the table that is being processed.
    :param to_be_compared: the content of the table, a list of lines, each line contains several cells
    """

    # decide the file path and write into json_dict
    check_file = FILE_TO_READ.split(".")[0][-1]
    if int(check_file) > 1:
        # read previous json file and continuous write
        path = OUTPUT_JSON_PATH + '/' + FILE_TO_READ.split(
            ".")[0][0:-1] + '1' + '.json'
        with open(path) as f:
            json_dict = json.load(f)
        prev_row_list = [int(x) for x in list(json_dict.keys())]
        start_row_num = max(prev_row_list) + 1
        json_dict = parse_json(json_dict, start_row_num, to_be_compared)
    else:
        # set the PATH and create a new ObjDict
        path = OUTPUT_JSON_PATH + '/' + FILE_TO_READ.split(".")[0] + '.json'
        json_dict = ObjDict()
        json_dict = parse_json(json_dict, 0, to_be_compared)

    # write into json
    json_object = json.dumps(json_dict, indent=4)
    with open(path, "w") as outfile:
        outfile.write(json_object)
Exemplo n.º 22
0
    def fill_meta(self):
        self.meta_remaked.header = ObjDict()
        self.meta_remaked.header.issue = self.issue.number
        self.meta_remaked.header.path = self.source.path
        self.meta_remaked.header.library = self.source.library
        self.meta_remaked.header.version = self.source.version
        self.meta_remaked.header.index = self.source_index

        if hasattr(self.meta_decompiled.header, "filedate") and hasattr(
                self.meta_decompiled.header, "filetime"):
            year = ((self.meta_decompiled.header.filedate & 0b1111111000000000)
                    >> 9) + 1980
            month = (self.meta_decompiled.header.filedate
                     & 0b0000000111100000) >> 5
            day = self.meta_decompiled.header.filedate & 0b0000000000011111
            hour = (self.meta_decompiled.header.filetime
                    & 0b1111100000000000) >> 11
            minute = (self.meta_decompiled.header.filetime
                      & 0b0000011111100000) >> 5
            sec = (self.meta_decompiled.header.filetime
                   & 0b0000000000011111) * 2

            try:
                self.meta_remaked.header.created = datetime.datetime(
                    year, month, day, hour, minute, sec).isoformat()
            except ValueError:
                self.meta_remaked.header.created = ""
        else:
            self.meta_remaked.header.created = ""

        self.meta_remaked.header.remaked = datetime.datetime.now().isoformat()
Exemplo n.º 23
0
	def fill_meta_header(self):
		super(MusicDecompiler, self).fill_meta_header()

		self.meta.header2 = ObjDict()
		self.meta.header2.count_mods = self.library.header2.count_mods
		self.meta.header2.count_samples = self.library.header2.count_samples
		self.meta.header2.foo = self.library.header2.foo
Exemplo n.º 24
0
    def get(self):
        logger.info("****************************************")
        number_of_files = 100

        result = service.get_blob_files_as_regularsurfaces(number_of_files)

        l = len(result)
        del result
        gc.collect()

        data = ObjDict()
        data.number_of_thread = l
        data.cpu_count = os.cpu_count()
        data.gc_get_count = list(gc.get_count())

        return jsonify(data)
Exemplo n.º 25
0
def config_load(config_path):
    with open(config_path) as f:
        config_yaml = yaml_load(f)

    config = ObjDict(json.dumps(config_yaml))

    return config
Exemplo n.º 26
0
    def get(self):
        number_of_files = 100

        bs = service.get_blobe_as_streams_thread(number_of_files)
        result = service.get_file_streams_as_regularsurfaces_thread(bs)
        l = len(result)
        del result
        del bs
        gc.collect()

        data = ObjDict()
        data.number_of_thread = l
        data.cpu_count = os.cpu_count()
        data.gc_get_count = list(gc.get_count())

        return jsonify(data)
Exemplo n.º 27
0
def createPredictionJsonFile(fileConfig, fileName, co_pred, rss_pred, co_bin,
                             co_type, original_esi, rss):
    data = ObjDict()
    data.co_pred = np.float64(co_pred).tolist()
    data.rss_pred = rss_pred.tolist()
    data.ESI = original_esi.tolist()
    data.rss = rss.tolist()
    data.co_bin = co_bin.tolist()
    data.co_type = co_type.tolist()
    json_data = data.dumps()
    fileLocation = fileConfig['dataDirectory'] + fileConfig[
        'resultDirectory'] + fileName
    with open(fileLocation, 'w') as outfile:
        outfile.write(json_data)
Exemplo n.º 28
0
    def json_data_for_accuracy(self):

        rows = list()

        # meta
        ticker = self.ticker
        date = self.prediction_date
        prediction = self.prediction
        id = "%s-%s-%s" % (ticker, date, prediction)

        meta = {
            "index": {
                "_index": TRADE_INDEX_NAME,
                "_type": TRADE_TYPE_NAME,
                "_id": id
            }
        }
        rows.append(json.dumps(meta))

        # data
        row = ObjDict()
        row.result = self.result
        row.prediction = self.prediction
        row.prediction_date = self.prediction_date
        row.ticker = self.ticker
        row.accuracy = self.accuracy
        rows.append(json.dumps(row))

        return rows
Exemplo n.º 29
0
def generate_flights():
    napis = "DEF"
    liczba = 5678
    departure_date = datetime.datetime(2000, 1, 1, 12, 15)
    arrival_date = datetime.datetime(2000, 1, 1, 14, 35)
    global json_string3
    json_string3 = "["
    for i in range(0, 50):
        for j in range(0, 50):
            flight = ObjDict()
            flight.model = "loty2.flight"
            flight.pk = i * 50 + j + 1
            flight.fields = ObjDict()
            flight.fields.flight_number = napis + str(liczba)
            liczba += 1
            flight.fields.plane = j + 1
            miasto1 = randint(0, 149)
            miasto2 = randint(0, 149)
            while miasto2 == miasto1:
                miasto2 = randint(0, 149)
            flight.fields.departure_airport = miasto1 + 1
            flight.fields.arrival_airport = miasto2 + 1
            flight.fields.date_of_departure = departure_date
            flight.fields.date_of_arrival = arrival_date
            flight.fields.flight_crew = j % 10 + 1
            departure_date += datetime.timedelta(hours=6)
            arrival_date += datetime.timedelta(hours=6, minutes=1)
            json_string3 += flight.dumps()
            if i != 49 or j != 49:
                json_string3 += ","
Exemplo n.º 30
0
def build_output_response(inputs, image_label, error=None):
    """

    :param inputs: The inputs gathered from the extraction process
    :param image_label: The label of the image we have predicted
    :return: The json response object
    """
    values = ObjDict()
    values.values = []
    image_labels = {'amllabel': image_label}

    errors = ''
    values.values.append({'recordId': inputs['values'][0]['recordId'], \
                          "errors": errors,
                          "data": image_labels,
                          "warnings": ""})

    return values