Exemple #1
0
def removeDevice(update: Update, context: CallbackContext):
    chat_id = update.message.chat_id

    message = update.message.text
    message = message.lower()
    message = message.replace("rimuovi", "")
    message = message.replace(" ", "")

    try:
        listDevice = retrieveDevicesInfo()
    except Exception:
        context.bot.send_message(chat_id=chat_id, text="Errore nel db")
        return

    for index in range(0, len(listDevice)):
        if listDevice[index].name == message:
            del listDevice[index]
            with open("./db", "w") as file:
                for element in listDevice:
                    file.write(
                        str(JSONSerializer.serialize(element)).replace(
                            "\'", "\"") + "\n")
            context.bot.send_message(chat_id=chat_id, text="Device eliminato")
            return

    context.bot.send_message(chat_id=chat_id, text="Device non trovato")
Exemple #2
0
    def suggestions():
        q = request.args.get(Q_PARAMETER) if request.args.get(
            Q_PARAMETER) else ""

        try:
            latitude = float(
                request.args.get(LATITUDE_PARAMETER)) if request.args.get(
                    LATITUDE_PARAMETER) else None
            longitude = float(
                request.args.get(LONGITUDE_PARAMETER)) if request.args.get(
                    LONGITUDE_PARAMETER) else None
            city_query = CityQuery(suffix_tree)

        except ValueError:
            return abort(HTTPStatus.BAD_REQUEST)

        response = Response(json.dumps(JSONSerializer.serialize(
            city_query.get(normalize_input(q), latitude, longitude)),
                                       ensure_ascii=False,
                                       sort_keys=False,
                                       indent=2),
                            mimetype='application/json',
                            content_type="application/json; charset=utf-8")
        response.status_code = HTTPStatus.OK
        return response
Exemple #3
0
def save_jobs(jobs):
    date_str = datetime.datetime.now().isoformat()
    name = os.path.join('tools', 'recorded_jobs', f"jobs-{date_str}.json")
    with open(name, 'w') as f:
        serialized_jobs = JSONSerializer.serialize(jobs)
        json.dump(serialized_jobs, f)
        logging.info(f'[{name}] dumped {len(serialized_jobs)} jobs.')
    def test_json_serialization_nested(self):
        obj = Song(Person("Fred"))
        serialized_obj = {'artist': {'name': "Fred"}}

        with self.subTest("Serialize nested dataclass -> JSON"):
            self.assertEqual(serialized_obj, JSONSerializer.serialize(obj))

        with self.subTest("Deserialize JSON -> nested dataclass"):
            self.assertEqual(obj, JSONSerializer.deserialize(Song, serialized_obj))
    def test_json_serialization_basic(self):
        obj = Person("Fred")
        serialized_obj = {'name': "Fred"}

        with self.subTest("Serialize dataclass -> JSON"):
            self.assertEqual(serialized_obj, JSONSerializer.serialize(obj))

        with self.subTest("Deserialize JSON -> dataclass"):
            self.assertEqual(obj, JSONSerializer.deserialize(Person, serialized_obj))
def get_repositories_by_organization_and_team(organization_name: str,
                                              team_name: str,
                                              include_repo_detail: str):
    """ Gets Organization Info for public original and forked repos from github with organization_name, and bitbucket with team name.
        If only top-level stats are desired, pass in include_repo_detail=false, otherwise you can opt to include specific details about each repo. """
    repository_service = RepositoryService()
    organization_info_dto = repository_service.get_repositories_by_organization_and_team(
        organization_name, team_name)

    if include_repo_detail == 'false':
        organization_info_dto.repositories = {}
    return JSONSerializer.serialize(organization_info_dto), 200
def get_similar_images(dest_image, feature_extraction_method):
    start = time.time()
    print(feature_extraction_method)
    source_image = cv2.imread(
        cv2.os.path.join(settings.MEDIA_ROOT, str(dest_image)))
    results = []
    if isColorFeature(feature_extraction_method):
        hsv_histogram = HSVColorHistogram([8, 8, 8])
        source_color_features = hsv_histogram.describe(source_image)
    if isTextureFeature(feature_extraction_method):
        texture_histogram = TextureHistogram(radius=3)
        source_texture_features = texture_histogram.describe(source_image)

    db_images = Images.objects.all().filter(status='1')
    for temp_image in db_images:
        score_color = 0
        if isColorFeature(feature_extraction_method):
            db_color_features = get_histogram_from_string(
                temp_image.color_histogram, 512)
            source_color_features = np.around(np.array(source_color_features,
                                                       dtype=np.float32),
                                              decimals=8)
            db_color_features = np.around(np.array(db_color_features,
                                                   dtype=np.float32),
                                          decimals=8)
            score_color = cv2.compareHist(source_color_features,
                                          db_color_features,
                                          cv2.HISTCMP_BHATTACHARYYA)

        score_texture = 0
        if isTextureFeature(feature_extraction_method):
            db_texture_features = get_histogram_from_string(
                temp_image.texture_histogram, 26)
            source_texture_features = np.around(source_texture_features,
                                                decimals=8)
            score_texture = cv2.compareHist(
                np.array(source_texture_features, dtype=np.float32),
                np.array(db_texture_features, dtype=np.float32),
                cv2.HISTCMP_BHATTACHARYYA)
        image_item = ImageItem(
            temp_image.name, "http://localhost:8000/media/" + temp_image.name,
            score_color + score_texture, temp_image.width, temp_image.height)
        results.append(image_item)

    results = sorted(results, key=lambda temp: temp.score)[:10]
    print(results)
    for result in results:
        copyfile('/home/gorkem/Desktop/flickr-test-data/' + result.name,
                 cv2.os.path.join(settings.MEDIA_ROOT, result.name))
    end = time.time()
    print("asdsadsadsa")
    print(end - start)
    return list(map((lambda x: JSONSerializer.serialize(x)), results))
 def _convert_to_string(input):
     f_output = json.dumps(
                 JSONSerializer.serialize(input)
             )
     # TODO: need to find a better way to do this
     f_output = f_output.replace('source_ref' , 'source-ref') \
         .replace('bounding_box' , 'bounding-box') \
         .replace('bounding_box_metadata' , 'bounding-box-metadata') \
         .replace('bounding-box_metadata' , 'bounding-box-metadata') \
         .replace('class_map' , 'class-map') \
         .replace('human_annotated' , 'human-annotated') \
         .replace('creation_date' , 'creation-date') \
         .replace('job_name' , 'job-name')
     return f_output
Exemple #9
0
def get_lib(lib_name):
	"""
	Goal is to generate a json like: 
	{
	  "name":"flask",
	  "latest":"X.Y.Z",
	  "count":34,
	  "versions": [...]
	}
	"""

	lib_versions = PypiRetriever.get_versions(lib_name)	
	lib = LibraryData(name=lib_name, latest=lib_versions[-1] ,versions=lib_versions, 
		counter=len(lib_versions))

	return JSONSerializer.serialize(lib);
Exemple #10
0
def process_image():
    # convert string of image data to uint8
    photo = request.files['photo'].read()
    nparr = np.fromstring(photo, np.uint8)
    # decode image
    img = cv2.imdecode(nparr, cv2.IMREAD_GRAYSCALE)

    # do processing here....
    res = receipt_image_processor.process_image(img)
    print(res)
    js = []

    for i in res:
        js.append(JSONSerializer.serialize(i))
    # build a response dict to send back to client
    return Response(json.dumps(js, ensure_ascii=False).encode('utf8'),
                    mimetype='application/json')
Exemple #11
0
    def test_json_serialization_types(self):
        test_cases = [
            (int, 1, 1),
            (float, 1.0, 1.0),
            (str, "Fred", "Fred"),
            (bool, True, True),
            (dict, {'name': "Fred"}, {'name': "Fred"}),
            (Dict, {'name': "Fred"}, {'name': "Fred"}),
            (Dict[str, Person], {'abc123': Person("Fred")}, {'abc123': {'name': "Fred"}}),
            (list, [{'name': "Fred"}], [{'name': "Fred"}]),
            (List, [{'name': "Fred"}], [{'name': "Fred"}]),
            (List[Person], [Person("Fred")], [{'name': "Fred"}]),
            (type(None), None, None)
        ]

        for type_, obj, serialized_obj in test_cases:
            with self.subTest("Serialize object", obj=obj):
                self.assertEqual(serialized_obj, JSONSerializer.serialize(obj))

            with self.subTest("Deserialize object", obj=obj):
                self.assertEqual(obj, JSONSerializer.deserialize(type_, serialized_obj))
Exemple #12
0
def apply_to_db(band: Band, db_handle, is_detailed):
    logger = logging.getLogger('Crawler')
    logger.debug("Apply to DB...")

    # Serialize a Band object and massage it so that the DB model understands it.
    temp_band_dict = JSONSerializer.serialize(band)
    # TODO: Fond out if these are necessary.
    del temp_band_dict['lineup']
    del temp_band_dict['releases']
    # DB expects date objects instead of strings.
    temp_band_dict['active'] = make_active_list(band.active)
    temp_band_dict['visited'] = datetime.strptime(band.visited,
                                                  "%Y-%m-%d").date()

    if band.formed != 'N/A':
        temp_band_dict['formed'] = date(int(band.formed), 1, 1)
    else:
        temp_band_dict['formed'] = None

    logger.debug(f'  Writing data for band {band.link}.')
    db_handle.add_band(temp_band_dict)

    for emid, release in band.releases.items():
        # We need to copy the dict first because we need to make a date object for the release date.
        release_copy = JSONSerializer.serialize(release)
        # This is not the accurate date, only the year.
        release_copy['release_date'] = date(int(release_copy['release_date']),
                                            1, 1)
        logger.debug(f'  Writing data for release {release_copy["name"]}.')
        db_handle.add_release(release_copy)
        db_handle.band_recorded_release(band.emid, emid)

    for status, members in band.lineup.items():
        for member in members:
            temp_member_dict = JSONSerializer.serialize(member)
            temp_member_dict['visited'] = datetime.strptime(
                member.visited, "%Y-%m-%d").date()
            logger.debug(
                f"  Writing data for artist {temp_member_dict['link']}.")

            try:
                db_handle.add_member(temp_member_dict)
            except Exception as e:
                logger.exception(
                    f'Adding the band member was unsuccessful: {member.link}',
                    e)

            for instrument in member.instruments:
                try:
                    db_handle.member_played_in_band(
                        member.emid, band.emid, instrument[0],
                        member.pseudonym, make_time_spans(instrument[1]),
                        get_dict_key(MEMBER_STATUS, status))
                except Exception as e:
                    logger.exception("Making member connection failed.",
                                     e,
                                     exc_info=True)
                    logger.error(member)
                    logger.error(band.emid)
                    logger.error(instrument)
                    logger.error(member.pseudonym)
                    logger.error(get_dict_key(MEMBER_STATUS, status))

    # Add labels if mode is detailed.
    if is_detailed:
        pass
Exemple #13
0
    def run(self):
        """Runs crawling as long as band links are retrieved from the links queue.

        :return: -1 as soon as the queue runs out of links.
        """
        self.logger.debug("Running " + self.name)

        while stop_crawl_user_input is not "Q":
            try:
                link_band_temp = self.bandLinks.get_nowait()
            except queue.Empty:
                return -1

            # TODO: Implement revisiting mechanism based on date.
            # No need to visit if the band is already in the database.
            if link_band_temp in self.visited_entities['bands']:
                self.logger.debug(f"  Skipping {link_band_temp}.")
                self.update_bar(link_band_temp)
                self.band_errors[STATUS_SKIPPED][link_band_temp] = ""
                continue

            try:
                crawl_result = self.crawl_band(link_band_temp)
            except Exception as e:
                self.logger.exception('Something bad happened while crawling.',
                                      e)
                crawl_result = None

            # Error case: putting the link back into circulation.
            if crawl_result is None:
                if link_band_temp not in self.band_errors[STATUS_ERROR].keys():
                    self.band_errors[STATUS_ERROR][link_band_temp] = 1
                else:
                    self.band_errors[STATUS_ERROR][link_band_temp] += 1

                if self.band_errors[STATUS_ERROR][
                        link_band_temp] < self.retries_max:
                    self.bandLinks.put(link_band_temp)
                else:
                    self.logger.error(
                        f'Too many retries for {link_band_temp}.')
                    self.update_bar(link_band_temp)
                continue
            else:
                self.visited_entities['bands'][link_band_temp] = ''

            self.lock.acquire()

            try:
                apply_to_db(crawl_result, self.db_handle, self.is_detailed)
                self.band_errors[STATUS_ADDED][link_band_temp] = ''
            except Exception as e:
                self.logger.exception(
                    'Writing artists failed! This is bad. Expect loss of data for the above band.',
                    e)
                self.band_errors[STATUS_ERROR][link_band_temp] = ''
            finally:
                self.lock.release()
                self.update_bar(link_band_temp)

            # Saving the data to disk will later enable us to limit getting live data if it is not needed.
            actual_band_path = f"databases/{crawl_result.country}"
            os.makedirs(actual_band_path, exist_ok=True)
            # We take the band link because it always uses escaped sequences. This way we have the highest
            # compatibility for writing files in underlying filesystems. The slash must be replaced of course.
            db_path = Path(
                f"{actual_band_path}/{crawl_result.link.replace('/', '_')}.json"
            )
            actual_band_file = open(db_path, "w", encoding="utf-8")
            # TODO: Add try block for the dump. It crashed once because it found a Tag object.
            band_data = JSONSerializer.serialize(crawl_result)
            band_data_text = json.dumps(band_data)
            actual_band_file.write(band_data_text)
            actual_band_file.close()
from dataclasses_serialization.json import JSONSerializer

from services.models.job import BridgeTxInfo, JobTxInfo

job = JobTxInfo('123', JobTxInfo.STATUS_ACTIVE, chain='BNB', intx_time=0,
                in_tx=BridgeTxInfo(
                    'BNB', 66312, 100.4, 'bnb123', '', '49394034930434093409343', 2302303
                ),
                out_tx=BridgeTxInfo(
                    'ETH', 5454343, 100.1, '', '0xC349309232ABC', '3432423292832983', 2302305
                ))


print(JSONSerializer.serialize(job))

j = JSONSerializer.serialize(job)
out_job = JSONSerializer.deserialize(JobTxInfo, j)
print(out_job)
assert job == out_job