Пример #1
0
def save(url, uri=None, info=None):
    uri = uri or url

    key = create_key(uri, 'full', 'max', '0', 'default',
                     get_setting('cache_format', 'jpg'))

    info = info or get_info(url, uri, download=False)

    if not info:
        i = get_image(uri, url)
        info = {'width': i.width, 'height': i.height, 'format': i.format}

    i = get_image(url, uri)

    # quick hack to avoid tiling of JPEG2000 images when cached
    if get_setting('cache_original', False) and get_setting(
            'opj_decompress', None) and i.format == 'JPEG2000':
        # do nothing for now
        ...
    else:
        # save full size?
        if get_setting('cache_full',
                       False) and not get_setting('save_original', False):
            save_to_cache(
                create_key(uri, 'full', 'max', '0', 'default',
                           get_setting('cache_format', 'jpg')), i)

        ingest(i, url, uri)

    # write info
    info = {'width': i.width, 'height': i.height, 'format': i.format}
    cache.set(uri, dumps(info))

    return info
Пример #2
0
def ingest(i, url, uri=None):
    tile_size = get_setting('tile_size', '512')
    min_n = int(log2(tile_size))
    max_n = int(min(log2(i.width), log2(i.height)))
    uri = uri or url

    for n in reversed(range(min_n, max_n + 1)):
        size = 2**n

        for x in range(0, int((i.width - 1) / size) + 1):
            for y in range(0, int((i.height - 1) / size) + 1):
                offset_x = size * x
                offset_y = size * y
                width = min(size, i.width - offset_x)
                height = min(size, i.height - offset_y)

                i2 = i.crop((offset_x, offset_y, offset_x + width, offset_y + height)) \
                      .resize((round((2**min_n * width)/size), round((2**min_n*height)/size)), Image.LANCZOS)

                if n != min_n:
                    i2 = i2.filter(
                        ImageFilter.UnsharpMask(radius=0.8,
                                                percent=90,
                                                threshold=3))

                save_to_cache(
                    create_key(
                        uri,
                        #','.join( [ str(size*x), str(size*y), str(size*(x+1)-1), str(size*(y+1)-1) ],
                        ','.join([
                            str(offset_x),
                            str(offset_y),
                            str(width),
                            str(height)
                        ]),
                        #'!512,512',
                        ','.join([
                            str(round((2**min_n * width) / size)),
                            str(round((2**min_n * height) / size))
                        ]),
                        '0',
                        'default',
                        get_setting('cache_format', 'jpg'),
                        width=i.width,
                        height=i.height,
                        normalize=True),
                    i2)

    for extra in get_setting('prerender', []):
        save_to_cache(create_key(uri, **extra), create_image(i, **extra))
Пример #3
0
def get_image(url, uri=None):
    uri = uri or url

    print(f'get image: {url}', flush=True)

    if url.startswith('file:///'):
        s = open(url[7:], mode='rb')
    else:
        req = hget(url, stream=True)
        req.raw.decode_stream = True
        s = req.raw

    if get_setting('cache_original', False) and not url.startswith('file:///'):
        print(f'caching original ({uri})', flush=True)
        key = uri + ':original'
        cache.set(key, s)
        i = Image.open(join(*cache.get_location(key)))
    else:
        b = s.read()
        i = Image.open(BytesIO(b))

    if get_setting('cache_full') and not url.startswith('file:///'):
        # TODO file size sanity check
        key = create_key(uri, 'full', 'max', '0', 'default',
                         get_setting('cache_format', 'jpg'))
        print(f'caching original ({key})', flush=True)
        save_to_cache(key, i)

    return i
Пример #4
0
def scrape_before_2016(soup):
    all_org_data = {}
    orgs = soup.findAll(
        'li', attrs={'class': 'mdl-list__item mdl-list__item--one-line'})
    for org in orgs:
        link = org.find('a')
        org_name = link.text
        all_org_data[create_key(org_name)] = {
            "name": org_name,
        }
    return all_org_data
Пример #5
0
    def get_random(valid: bool = True) -> dict:
        """Returns a random ID with it's corresponding private key"""

        key = create_key()
        pubkey = parse_key(key.publickey())
        username = random_word(random.randint(1, USERNAME_LIMIT))

        if valid is False:
            username = "******" * (USERNAME_LIMIT + 1)

        return {"private_key": parse_key(key), "id": ID(username, pubkey)}
Пример #6
0
def newUser():
    student_login = ''

    while student_login == '':
        student_login = input('login insper: ').strip()

    student_key = create_key(f'students/{student_login}.key')
    student_name = input('nome completo: ')
    ghname = input('usuário do github: ')
    s = Student(student_login, student_name, ghname, [])
    write_string_to_file(f'students/{student_login}', s.toJSON())
    
    save_encrypted(f'students/{student_login}-achievements', student_key, '[]')
Пример #7
0
def new_user():
    student_login = ''

    while student_login == '':
        student_login = input('login insper: ').strip()

    student_key = create_key(f'students/{student_login}.key')
    student_name = input('nome completo: ')
    student_avatar = input('imagem de avatar: ')
    s = Student(student_login, student_name, student_avatar, [])
    write_string_to_file(f'students/{student_login}', s.toJSON())

    save_encrypted(f'students/{student_login}-achievements', student_key, '[]')
Пример #8
0
def get_num_unique_combos(data_dir, keys):
    """
    Get number of occurrences for a property combination.
    """
    for input_file in glob.glob('%s/*.*' % data_dir):
        with open(input_file, 'rb') as f:
            data=pickle.load(f)
            grouped_by_props=defaultdict(list)
            for doc_id, doc_data in data.items():
                for part_id, part_props in doc_data.items():
                    k=utils.create_key(part_props, keys)
                    grouped_by_props[k].append(part_id)
        print(input_file)
        print('Number of unique combinations:', len(grouped_by_props.keys()))
        print('Distribution', json.dumps(utils.get_distribution(grouped_by_props), sort_keys=True))
Пример #9
0
    def create_keys(self):
        """Create a new pair of private and public keys"""
        try:
            # If we've already created a private key before, import it
            # Otherwise, create it

            with open(self.key_path, "r") as f:
                private_key = RSA.import_key(f.read(),
                                             passphrase=self.__password)
                public_key = private_key.publickey()
        except IOError:
            private_key = create_key()
            public_key = private_key.publickey()

            with open(self.key_path, "wb") as f:
                f.write(
                    private_key.export_key("PEM", passphrase=self.__password))

        self.__private_key = parse_key(private_key)
        self.__public_key = parse_key(public_key)
Пример #10
0
def analyze_distinguishability_of_props(data_dir, keys):
    """
    Analyze how often can two entity pairs be distinguished based on properties, and splits this into: distinguishable, indistinguishable, and sufficient to distinguish.
    """
    input_sets={}
    for input_file in glob.glob('%s/*.*' % data_dir):
        with open(input_file, 'rb') as f:
            data=pickle.load(f)
            grouped_by_name=defaultdict(dict)
            for doc_id, doc_data in data.items():
                for part_id, part_props in doc_data.items():
                    if 'Name' not in part_props.keys() or not part_props['Name'].strip(): continue
                    name=part_props['Name'].strip()
                    k=utils.create_key(part_props, keys)
                    grouped_by_name[name][part_id]=k
        #print(input_file)
        filename=utils.get_filename(input_file)
        input_sets[filename]=utils.obtain_sets(grouped_by_name)
        
    return input_sets
Пример #11
0
def scrape_from_2016(soup):
    all_org_data = {}
    orgs = soup.findAll('li', attrs={'class': 'organization-card__container'})
    for ind, org in enumerate(orgs):
        print("DEBUG: Scraped details for {}/{} orgs".format(ind, len(orgs)),
              end="\r")
        link = org.find('a', attrs={'class': 'organization-card__link'})
        org_name = org['aria-label']
        org_link = ROOT_URL + link['href']
        response = requests.get(org_link)
        html = response.content
        soup = BeautifulSoup(html, 'html.parser')

        tech_tags = soup.findAll(
            'li',
            attrs={'class': 'organization__tag organization__tag--technology'})
        technologies = []
        for tag in tech_tags:
            technologies.append(tag.text.replace(" ", ''))

        topic_tags = soup.findAll(
            'li',
            attrs={'class': 'organization__tag organization__tag--category'})
        category = topic_tags[0].text.replace('\n', '')

        topic_tags = soup.findAll(
            'li',
            attrs={'class': 'organization__tag organization__tag--topic'})
        topics = []
        for tag in topic_tags:
            topics.append(tag.text)

        all_org_data[create_key(org_name)] = {
            "name": org_name,
            "technologies": ' | '.join(technologies),
            "topics": ' | '.join(topics),
            "category": category
        }
    return all_org_data
Пример #12
0
    def normalize(self, **other_params):
        cherrypy.response.headers['Content-Type'] = "application/json"
        params = {}
        if cherrypy.request.method == "POST":
            params = cherrypy.request.json
        error_message = str()
        error_flag = False
        job_description = ""
        batch_size = 0
        total_time = time.time()

        if norm_job.JOBS_PARAMETER not in params:
            error_flag = True
            error_message = configurator.commons.MALFORMED_REQUEST_ERROR_MESSAGE
        else:
            jobs = params[norm_job.JOBS_PARAMETER]
            job_array = []
            skill_array = []
            responses = []
            bypass_array = []
            batch_size = len(jobs)
            for job in jobs:
                try:
                    filtered_title = job[norm_job.JOB_TITLE_PARAMETER]
                    if "instead of" in filtered_title.lower():
                        filtered_title = filtered_title[:filtered_title.lower(
                        ).find("instead of")].strip()
                    filtered_title = create_key(filtered_title, self.city_list,
                                                self.state_list,
                                                self.state_codes)
                    job[norm_job.JOB_TITLE_PARAMETER] = filtered_title
                except:
                    filtered_title = ""
                job_description = ""
                if norm_job.JOB_DESCRIPTION_PARAMETER in job:
                    job_description = job[norm_job.JOB_DESCRIPTION_PARAMETER]
                title_ngrams = find_all_ngrams_upto(filtered_title.lower(), 4)
                if title_ngrams.intersection(self.driver_ngrams_set):
                    bypass_array.append(1)
                else:
                    job_array.append((filtered_title, job_description))
                    bypass_array.append(0)
                imp_skills = set()

                if job_description:
                    sentences = norm_job.sent_detector.tokenize(
                        job_description)
                    for sentence in sentences:
                        lower_sentence = sentence.lower()
                        sentence_n_grams = find_all_ngrams_upto(
                            lower_sentence, norm_job.ngram_limit)
                        imp_skills.update(
                            sentence_n_grams.intersection(
                                norm_job.universal_skill_set))
                skill_array.append(imp_skills)

            start_time = time.time()
            prediction_array = self.model.predict(job_array)
            root.info(
                "Context Free classification for {0} points done in {1}s".
                format(len(prediction_array),
                       time.time() - start_time))
            del job_array
            #             root.info(prediction_array)

            start_time = time.time()
            for point_index, selector_value in enumerate(bypass_array):
                if selector_value:
                    soc_codes_with_conf = self.driver_soc_codes
                else:
                    soc_codes_with_conf = prediction_array.pop(0)
                soc_codes = [
                    soc[0] for soc in sorted(
                        soc_codes_with_conf, key=lambda k: k[1], reverse=True)
                ]
                try:
                    job_title = jobs[point_index][norm_job.JOB_TITLE_PARAMETER]
                    if "instead of" in job_title.lower():
                        job_title = job_title[:job_title.lower().
                                              find("instead of")].strip()
                except:
                    error_flag = True
                    error_message = configurator.commons.MALFORMED_REQUEST_ERROR_MESSAGE
                if not error_flag:
                    response_json = {}
                    response_json["index"] = point_index
                    response_json["clean_original_title"] = format_skills(
                        jobs[point_index][norm_job.JOB_TITLE_PARAMETER])
                    response_json["soc_code"] = ''
                    response_json["confidence"] = 0
                    response_json["closest_lay_title"] = ''
                    response_json["major_group_string"] = ''
                    response_json["skills"] = list(skill_array[point_index])

                    if not soc_codes:
                        ''' The given job posting could not be normalized using our standard algorithm.
                        We should use the soc_hint parameter present here to see if we can find a nearby
                        title in the given hint SOC code.'''
                        if norm_job.SOC_HINT_PARAMETER in jobs[point_index]:
                            soc_hint = jobs[point_index][
                                norm_job.SOC_HINT_PARAMETER]
                            if soc_hint in self.soc_mapping:
                                ''' This is a valid SOC Code '''
                                associated_soc_codes = self.soc_mapping[
                                    soc_hint]
                                soc_codes = list(associated_soc_codes)
                                root.info(
                                    "Hinted {} hence, Comparing Against Codes {}"
                                    .format(soc_hint, soc_codes))
                            else:
                                ''' This is an invalid SOC Code and we can't do much about it. '''
                                root.info(
                                    "No matching SOC Code found in soc_hint {}. Cannot normalize."
                                    .format(soc_hint))
                    if soc_codes:
                        key_string = filter_chain.apply(
                            convert_encoding(job_title), is_title=True)[1]
                        closest_lay_title_tuple = norm_job.fetch_closest_lay_title(
                            self.lay_title_list, self.soc_lay_title_token_list,
                            soc_codes, key_string, "")
                        major_group_string = configurator.commons.DEFAULT_MAJOR_GROUP_STRING
                        if closest_lay_title_tuple[1] in self.soc_master_dict:
                            major_group_string = self.soc_master_dict[
                                closest_lay_title_tuple[1]][
                                    'major_group_string']
                        lay_title = convert_encoding(
                            closest_lay_title_tuple[0])
                        if lay_title in self.lay_title_dict:
                            lay_title = self.lay_title_dict[lay_title]
                            if lay_title in self.similar_title_dict:
                                lay_title = self.similar_title_dict[lay_title]
                        response_json["soc_code"] = closest_lay_title_tuple[1]
                        response_json["confidence"] = int(
                            dict(soc_codes_with_conf)[
                                closest_lay_title_tuple[1]])
                        response_json['top_soc'] = closest_lay_title_tuple[2]
                        response_json["closest_lay_title"] = lay_title
                        response_json[
                            "major_group_string"] = major_group_string
                else:
                    response_json = {
                        "error_code":
                        configurator.commons.MALFORMED_REQUEST_ERROR_STATUS,
                        "message": error_message
                    }
                responses.append(response_json)
                error_flag = False
                if (point_index + 1) % 1000 == 0:
                    root.info("{0} points done in {1}s".format(
                        point_index,
                        time.time() - start_time))
                    start_time = time.time()
            responses_object = {"normalized_jobs": responses}
        if error_flag:
            cherrypy.response.status = configurator.commons.MALFORMED_REQUEST_ERROR_STATUS
            responses_object = {
                "error_code":
                configurator.commons.MALFORMED_REQUEST_ERROR_STATUS,
                "message": error_message
            }

        root.info("{0} points done in {1}s".format(batch_size,
                                                   time.time() - total_time))

        return responses_object
Пример #13
0
def image_iterator(url, uri, region, size, rotation, quality, format,
                   oversample):
    tile_size = get_setting('tile_size', 512)

    # optimistic first attempt at avoiding lookup by fixing size with trailing comma
    if match(r'^\d+,$', size) and match(r'^\d+,\d+,\d+,\d+$', region):
        x, y, w, h = [
            int(x) for x in match(r'(\d+),(\d+),(\d+),(\d+)', region).groups()
        ]
        s = int(match(r'(\d+),', size).group(1))
        size = size + str(int(s * h / w))

    # optimistic attempt at avoiding lookup by fixing max size
    if match(r'^\d+,\d+,\d+,\d+$', region) and match(r'^\d+,\d+$', size):
        x, y, w, h = [
            int(x) for x in match(r'(\d+),(\d+),(\d+),(\d+)', region).groups()
        ]
        sx, sy = [int(x) for x in match(r'(\d+),(\d+)', size).groups()]

        if (w, h) == (sx, sy):
            size = 'max'

    # short-circuit full-size files that resolve to disk
    if url and url.startswith(
            'file:///'
    ) and size == 'max' and region == 'full' and rotation == '0' and quality == 'default' and url.endswith(
            format):
        with open(url[7:], mode='rb') as f:
            b = f.read(100 * 1024)
            while len(b) != 0:
                yield b
                b = f.read(100 * 1024)

        return

    # exact match?
    key = create_key(uri, region, size, rotation, quality, format)
    if key in cache:
        yield from cache.iter_get(key)
        return

    # get info and cache / tile file if necessary
    if uri in cache:
        i = loads(cache.get(uri))
    else:
        with cache.lock(uri + ':worker'):
            # check again after recieving lock
            if uri in cache:
                i = loads(cache.get(uri))
            else:
                i = get_info(url, uri)

                # tile everything except JPEG2000
                if i['format'] != 'JPEG2000':
                    im = get_image(url, uri)
                    ingest(im, url, uri)

    # match for normalized key?
    nkey = create_key(uri,
                      region,
                      size,
                      rotation,
                      quality,
                      format,
                      width=i['width'],
                      height=i['height'],
                      tile_size=tile_size,
                      normalize=True)
    if nkey in cache:
        yield from cache.iter_get(nkey)

        return

    # quick hack for JPEG2000 files that resolve to disk
    if get_setting('opj_decompress') and url.startswith(
            'file:///') and i['format'] == 'JPEG2000':
        image = opj_decompress(i,
                               url[7:],
                               region,
                               size,
                               tile_size=tile_size,
                               oversample=oversample)

        image = rotate(image, float(rotation))
        image = do_quality(image, quality)

        b = BytesIO()
        icc_profile = image.info.get("icc_profile")
        image.save(b,
                   quality=90,
                   icc_profile=icc_profile,
                   progressive=True,
                   format='jpeg' if format == 'jpg' else format)

        # this can get expensive!
        if get_setting('cache_all', False):
            print('warning: caching arbitrary sized image (%s)' % nkey,
                  flush=True)
            save_to_cache(nkey, image)
        elif is_tile(i, image):
            save_to_cache(nkey, image)

        yield b.getvalue()

        return

    # quick hack for JPEG2000 when cached originals allowed
    if get_setting('opj_decompress') and get_setting('cache_original'):
        okey = uri + ':original'

        if okey not in cache:
            with cache.lock(uri + ':worker'):
                if okey not in cache:
                    # calling get image will cache it
                    get_image(url, uri)

        loc = join(*cache.get_location(okey))

        image = opj_decompress(i,
                               loc,
                               region,
                               size,
                               tile_size=tile_size,
                               oversample=oversample)

        image = rotate(image, float(rotation))
        image = do_quality(image, quality)

        b = BytesIO()
        icc_profile = image.info.get("icc_profile")
        image.save(b,
                   quality=90,
                   icc_profile=icc_profile,
                   progressive=True,
                   format='jpeg' if format == 'jpg' else format)

        # this can get expensive!
        if get_setting('cache_all', False) or is_tile(i, image):
            print('warning: caching arbitrary sized image (%s)' % nkey,
                  flush=True)
            save_to_cache(nkey, image)

        yield b.getvalue()

        return

    # image is cached, just not in the right rotation, quality or format?
    key = create_key(uri, region, size, '0', 'default',
                     config['settings']['cache_format'])
    if key in cache:
        image = Image.open(BytesIO(cache.get(key)))
    elif url.startswith('file:///'):
        image = get_image(url, uri)
        image = crop(image, region)
        image = resize(image, size)
    else:
        print('doing actual work for uri: ' + uri, flush=True)
        # image is cached, but size is wrong?
        # TODO: use optimal size rather than 'max'
        key = create_key(uri, region, 'max', '0', 'default',
                         config['settings']['cache_format'])
        if key in cache:
            image = resize(Image.open(BytesIO(cache.get(key))), size)
        else:
            # requested image is also cropped
            # TODO: use optimal size rather than 'max'
            key = create_key(uri, 'full', 'max', '0', 'default',
                             config['settings']['cache_format'])

            if key in cache:
                image = Image.open(BytesIO(cache.get(key)))
                image = crop(image, region)
                image = resize(image, size)
            else:
                raise Exception('Stitching image from tiles not supported yet')

    image = rotate(image, float(rotation))
    image = do_quality(image, quality)

    b = BytesIO()
    icc_profile = image.info.get("icc_profile")
    image.save(b,
               quality=90,
               icc_profile=icc_profile,
               progressive=True,
               format='jpeg' if format == 'jpg' else format)

    # this can get expensive!
    if get_setting('cache_all', False):
        print('warning: caching arbitrary sized image (%s)' % nkey, flush=True)
        save_to_cache(nkey, image)

    yield b.getvalue()
Пример #14
0
def test_accusation():

    # Valid random accusation
    a = Accusation.get_random()["accusation"]
    assert a.is_valid() == True

    # Invalid random accusation
    a = Accusation.get_random(valid=False)["accusation"]
    assert a.is_valid() == False

    judges = [ID.get_random()["id"] for _ in range(MIN_JUDGES)]
    rule = {
        "content": "You shall not kill",
        "sentence": "Some really bad thing",
        "reward": {
            "value": 1000,
            "currency": "Nano"
        }
    }
    rules = [rule for _ in range(MIN_RULES)]
    expire = datetime.datetime.now() + datetime.timedelta(days=1)

    contract = Contract(userid, rules, judges, expire)
    contract.sign(key)
    accused = ID("Murray Rothbard", parsed_pubkey)

    # Should be valid as well
    a = Accusation(userid, accused, contract)
    a.sign(key)
    assert a.is_valid() == True

    with pytest.raises(TypeError):
        a = Accusation("User", accused, contract)
    with pytest.raises(TypeError):
        a = Accusation(userid, "Accused", contract)
    with pytest.raises(TypeError):
        a = Accusation(userid, accused, {"content": "..."})

    content = hash_content(
        {
            "username": "******",
            "public_key": parsed_pubkey
        }, HASH_DIFFICULTY - 1, NONCE_LIMIT)

    content["hash_value"] = content["hash_value"].replace("0", "a")
    invalid_id = ID(**content)

    # Invalid sender's ID
    a = Accusation(invalid_id, accused, contract)
    a.sign(key)
    assert a.is_valid() == False

    # Invalid accused's ID
    a = Accusation(userid, invalid_id, contract)
    a.sign(key)
    assert a.is_valid() == False

    # Unmatching key
    a = Accusation(userid, accused, contract)
    a.sign(create_key())
    assert a.is_valid() == False

    # Invalid contract
    a = Accusation(userid, accused,
                   Contract.get_random(valid=False)["contract"])
    a.sign(key)
    assert a.is_valid() == False
Пример #15
0
def test_verdict():

    # Valid random verdict
    v = Verdict.get_random()["verdict"]
    assert v.is_valid() == True

    # Invalid random verdict
    v = Verdict.get_random(valid=False)["verdict"]
    assert v.is_valid() == False

    judges = [ID.get_random()["id"] for _ in range(MIN_JUDGES)]
    rule = {
        "content": "You shall not kill",
        "sentence": "Some really bad thing",
        "reward": {
            "value": 1000,
            "currency": "Nano"
        }
    }
    rules = [rule for _ in range(MIN_RULES)]
    expire = datetime.datetime.now() + datetime.timedelta(days=1)
    contract = Contract(userid, rules, judges, expire)
    contract.sign(key)
    accused = ID("Hercule Poirot", parsed_pubkey)

    accusation = Accusation(userid, accused, contract)
    accusation.sign(key)
    sentence = "Must be executed"
    description = "Because I say so"

    # Should be valid as well
    v = Verdict(userid, accusation, sentence, description)
    v.sign(key)
    assert v.is_valid() == True

    with pytest.raises(TypeError):
        v = Verdict("User", accusation, sentence, description)
    with pytest.raises(TypeError):
        v = Verdict(userid, {"content": "..."}, sentence, description)
    with pytest.raises(TypeError):
        v = Verdict(userid, accusation, False, description)
    with pytest.raises(TypeError):
        v = Verdict(userid, accusation, sentence, 76)

    content = hash_content(
        {
            "username": "******",
            "public_key": parsed_pubkey
        }, HASH_DIFFICULTY - 1, NONCE_LIMIT)

    content["hash_value"] = content["hash_value"].replace("0", "a")
    invalid_id = ID(**content)

    # Invalid sender's ID
    v = Verdict(invalid_id, accusation, sentence, description)
    v.sign(key)
    assert v.is_valid() == False

    # Unmatching key
    v = Verdict(userid, accusation, sentence, description)
    v.sign(create_key())
    assert v.is_valid() == False

    # Invalid accusation
    v = Verdict(userid,
                Accusation.get_random(valid=False)["accusation"], sentence,
                description)
    v.sign(key)
    assert v.is_valid() == False

    # No sentence at all
    v = Verdict(userid, accusation, "", description)
    v.sign(key)
    assert v.is_valid() == False

    # Sentence with more chars than the allowed
    v = Verdict(userid, accusation, "c" * (SENTECE_CHAR_LIMIT + 1),
                description)
    v.sign(key)
    assert v.is_valid() == False

    # Description with more chars than the allowed
    v = Verdict(userid, accusation, sentence,
                "c" * (DESCRIPTION_CHAR_LIMIT + 1))
    v.sign(key)
    assert v.is_valid() == False
Пример #16
0
def test_appeal():

    # Valid random appeal
    a = Appeal.get_random()["appeal"]
    assert a.is_valid() == True

    # Invalid random appeal
    a = Appeal.get_random(valid=False)["appeal"]
    assert a.is_valid() == False

    judges = [ID.get_random()["id"] for _ in range(MIN_JUDGES)]
    rule = {
        "content": "You shall not kill",
        "sentence": "Some really bad thing",
        "reward": {
            "value": 1000,
            "currency": "Nano"
        }
    }
    rules = [rule for _ in range(MIN_RULES)]
    expire = datetime.datetime.now() + datetime.timedelta(days=1)

    contract = Contract(userid, rules, judges, expire)
    contract.sign(key)

    accused = ID("Hercule Poirot", parsed_pubkey)
    accusation = Accusation(userid, accused, contract)
    accusation.sign(key)

    sentence = "Must be executed"
    description = "Because I say so"

    verdict = Verdict(userid, accusation, sentence, description)
    verdict.sign(key)

    # Valid hardcoded appeal
    a = Appeal(userid, verdict)
    a.sign(key)
    assert a.is_valid() == True

    with pytest.raises(TypeError):
        a = Appeal("User", verdict)
    with pytest.raises(TypeError):
        a = Appeal(userid, {"content": "..."})

    content = hash_content(
        {
            "username": "******",
            "public_key": parsed_pubkey
        }, HASH_DIFFICULTY - 1, NONCE_LIMIT)

    content["hash_value"] = content["hash_value"].replace("0", "a")
    invalid_id = ID(**content)

    # Invalid sender's ID
    a = Appeal(invalid_id, verdict)
    a.sign(key)
    assert a.is_valid() == False

    # Unmatching key
    a = Appeal(userid, verdict)
    a.sign(create_key())
    assert a.is_valid() == False

    invalid_verdict = Verdict(userid, accusation, "", description)
    invalid_verdict.sign(key)

    # Invalid verdict
    a = Appeal(userid, invalid_verdict)
    a.sign(key)
    assert a.is_valid() == False
Пример #17
0
def test_contract():

    # Valid random contract
    c = Contract.get_random()["contract"]
    assert c.is_valid() == True

    # Invalid random contract
    c = Contract.get_random(valid=False)["contract"]
    assert c.is_valid() == False

    judges = [ID("Agatha Christie", parsed_pubkey) for _ in range(MIN_JUDGES)]
    rule = {
        "content": "You shall not kill",
        "sentence": "Some really bad thing",
        "reward": {
            "value": 1000,
            "currency": "Nano"
        }
    }
    rules = [rule for _ in range(MIN_RULES)]
    expire = datetime.datetime.now() + datetime.timedelta(days=1)

    # Should be valid as well
    c = Contract(userid, rules, judges, expire)
    c.sign(key)
    assert c.is_valid() == True

    content = hash_content(
        {
            "username": "******",
            "public_key": parsed_pubkey
        }, HASH_DIFFICULTY - 1, NONCE_LIMIT)

    content["hash_value"] = content["hash_value"].replace("0", "a")
    invalid_id = ID(**content)

    # Invalid ID
    c = Contract(invalid_id, rules, judges, expire)
    c.sign(key)
    assert c.is_valid() == False

    # Unmatching key
    c = Contract(userid, rules, judges, expire)
    c.sign(create_key())
    assert c.is_valid() == False

    # No judges
    c = Contract(userid, rules, [], expire)
    c.sign(key)
    assert c.is_valid() == False

    # No rules
    c = Contract(userid, [], judges, expire)
    c.sign(key)
    assert c.is_valid() == False

    # Sender as judge
    c = Contract(userid, rules, [userid], expire)
    c.sign(key)
    assert c.is_valid() == SENDER_CAN_JUDGE

    # Repeated judges
    c = Contract(userid, rules, [judges[0] for _ in range(MAX_JUDGES)], expire)
    c.sign(key)
    assert c.is_valid() == False

    expired = datetime.datetime.now() - datetime.timedelta(days=1)
    c = Contract(userid, rules, judges, expired)
    c.sign(key)
    assert c.is_valid() == False
Пример #18
0
 def make_key(self):
     """ Create a key. """
     hashable = json.dumps(self.key_basis())
     self.redis_key = create_key(hashable, self.cache_type)
Пример #19
0
import sys
import os

sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../src")

from config import id_config
from utils import create_key, parse_key, hash_content
from identity import ID

HASH_DIFFICULTY = id_config["hash_difficulty"]
NONCE_LIMIT = id_config["nonce_limit"]
USERNAME_LIMIT = id_config["username_char_limit"]

key = create_key()
public_key = key.publickey()
parsed_pubkey = parse_key(public_key)


def test_ids():

    # Verify if the random contract is valid
    assert ID.get_random()["id"].is_valid() == True
    assert ID.is_id_valid(ID.get_random()["id"].to_dict()) == True

    # Verify if the random contract is not valid (when valid == False)
    assert ID.get_random(valid=False)["id"].is_valid() == False
    assert ID.is_id_valid(ID.get_random(valid=False)["id"].to_dict()) == False

    # Should be valid
    content = hash_content(
        {