Ejemplo n.º 1
0
def generate_hits(i_file_name_1, i_file_name_2, o_file_name):
    print('\n\n~ generate_hits ~ dic_1: ' + i_file_name_1 + ', dic_2: ' +
          i_file_name_2 + ', output: ' + o_file_name + '\n')
    hits = []
    with open(i_file_name_1, 'rb') as dic_file_1, \
            open(i_file_name_2, 'rb') as dic_file_2, \
            open(o_file_name, 'wb') as hits_file:

        # Load both sorted dictionaries
        dic_1 = pickle.load(dic_file_1)
        dic_2 = pickle.load(dic_file_2)
        i = 0
        j = 0
        while i < len(dic_1) and j < len(dic_2):
            if dic_1[i].k_hash == dic_2[j].k_hash:
                for p_1 in dic_1[i].position:
                    for p_2 in dic_2[j].position:
                        hit = Hit(dic_1[i].k_hash, p_1, p_2)
                        hits.append(hit)
                        #print(str(hit))
                        #print(kmer_index2word(hit.k_hash, 5, ALP), str(hit))
                # print(str(hit))
                i += 1
            elif dic_1[i].k_hash < dic_2[j].k_hash:
                i += 1
            elif dic_1[i].k_hash > dic_2[j].k_hash:
                j += 1
        # Dump array into file using pickle
        pickle.dump(hits, hits_file)
    dic_file_1.close()
    dic_file_2.close()
    hits_file.close()
Ejemplo n.º 2
0
def nb_redirect(request, obj_id):

    # increment the current object by 1
    nb = Notebook.objects.get(pk=obj_id)  # TODO: get or 404
    nb.hits_total += 1
    nb.save()
    ip = request.META['REMOTE_ADDR']
    h = Hit(model=nb, created=timezone.now(), ip=ip)
    h.save()
    # increment one view
    from bs4 import BeautifulSoup
    soup = BeautifulSoup(nb.full_html)
    context = {
        'html': str(soup.find('body')),
        'redirect': nb.html_url,
        'title': nb.title
    }
    return render(request, 'web/redirect.html', context)
Ejemplo n.º 3
0
    def get(self):
        self.response.headers['Content-Type'] = 'text/plain'
        devhelp_url = '\n\nPlease visit http://www.dynmirror.net/help/developers/ for more information.'
        #If url is given, use that as the digest
        digest = os.path.basename(self.request.path)
        if len(digest) and not digest.endswith('.metalink'):
            return self.not_found(
                '404 not found: If you do file look-ups by digest, it needs to end in ".metalink".'
                + devhelp_url)
        digest = digest[:-len('.metalink')]
        digests = {}
        url = self.request.get('url')
        fileLink = LinkElement()

        if len(url):
            #Try to load the digest from the database
            url = File.urlparse(url).geturl()

            #Add Coral link if wanted
            if self.request.get('coral', default_value='not_set') != 'not_set':
                cu = urlparse.urlparse(url)  #Parse for the coral url
                if cu.scheme == 'http' and not cu.netloc.endswith('.nyud.net'):
                    fileLink.urls.append({
                        'v':
                        'http://' + cu.hostname + '.nyud.net' + cu.path,
                        'a': {
                            'type': File.urlparse(url).scheme
                        }
                    })
            Hit(path=self.request.path,
                remote_addr=self.request.remote_addr,
                referer=self.request.headers.get('referer', ''),
                comment=url).save()
            f = File.get_by_key_name(url)
            if f == None:
                #Host a link only metalink
                fileLink.name = os.path.basename(url)
                fileLink.urls.insert(0, {
                    'v': url,
                    'a': {
                        'type': File.urlparse(url).scheme
                    }
                })
                self.response.headers[
                    'Content-Type'] = 'application/metalink+xml'
                self.response.headers[
                    'Content-Disposition'] = 'attachment; filename="%s.metalink"' % os.path.basename(
                        url).replace('"', '\\"')
                return self.render_to_response(
                    'metalink.xml', {
                        'files': [fileLink],
                        'comment':
                        'Link only, because no validated link information could be found in the database. Make sure you use the _exact_ url you used to add the metadata.'
                    })
            #inherit all the digest information from the file in db
            fileLink.name = f.name
            fileLink.size = f.size
            fileLink.digests.update(f.digests())
            if len(digest) > 0:
                return self.response.out.write(
                    'For security reasons, you can not combine digest and url, because we can not determine which is authorative and what to do in case they do not match in the database.'
                    + devhelp_url)

        if len(digest) > 0:
            #Malformed digest
            if not hash_sizes.has_key(len(digest)):
                return self.not_found(
                    '404 Not Found, the digest is considered malformed. Make sure it is lowercase hex representing an MD5, SHA1, SHA256 or SHA512.'
                    + devhelp_url)
            if not lower_hex_regex.match(digest):
                return self.not_found(
                    '404 Not Found, the digest is considered malformed because it did not match /[0-9a-f]/.'
                    + devhelp_url)
            #OK, so a digest is given, set the digest in the fileLink to the given value
            fileLink.digests[hash_sizes[len(digest)]] = digest

        #TODO Unique hosts; hosts = set
        #Explode the file using all known digests, both url and digest
        names = {}
        sizes = {}
        cntry_regex = re.compile(
            '.*\.(a[cdefgilmnoqrstuwxz]|c[acdfghiklmnoruvxyz]|b[abdefghijmnorstvwyz]|e[ceghrstu]|d[ejkmoz]|g[abdefghilmnpqrstuwy]|f[ijkmor]|i[delmnoqrst]|h[kmnrtu]|k[eghimnprwyz]|j[emop]|m[acdeghklmnopqrstuvwxyz]|l[abcikrstuvy]|o[m]|n[acefgilopruz]|q[a]|p[aefghklmnrstwy]|s[abcdeghijklmnortuvyz]|r[eosuw]|u[agkmsyz]|t[cdfghjklmnoprtvwz]|w[fs]|v[aceginu]|y[etu]|z[amw])$'
        )
        for digest_type in fileLink.digests:
            files = File.all().filter('%s = ' % digest_type,
                                      fileLink.digests[digest_type]).fetch(20)
            for f in files:
                attr = {
                    'type': 'http'
                }  #Optimization, currently only HTTP supported f.scheme
                cntry = cntry_regex.match(f.hostname)
                if cntry:
                    attr['location'] = cntry.group(1)
                fileLink.urls.append({'v': f.url(), 'a': attr})
                names.setdefault(f.name, 0)
                names[f.name] += 1
                sizes.setdefault(f.size, 0)
                sizes[f.size] += 1

        #If a name is given, just rename the file
        name = self.request.get('name')
        if name:
            fileLink.name = name
        if not fileLink.name:
            #Democratic naming
            names = [(names[k], k) for k in names]
            names.sort()
            fileLink.name = names[-1][1]
        if not fileLink.size:
            #Democratic size
            sizes[0] = 0
            sizes = [(sizes[k], k) for k in sizes]
            sizes.sort()
            fileLink.size = sizes[-1][1]
        random.shuffle(fileLink.urls)
        self.response.headers['Content-Type'] = 'application/metalink+xml'
        self.response.headers[
            'Content-Disposition'] = 'attachment; filename="%s.metalink"' % os.path.basename(
                fileLink.name).replace('"', '\\"')
        return self.render_to_response('metalink.xml', {'files': [fileLink]})