def variance_tex(self):
        orig_variance = []
        filtered_variance = []

        print "tex counts:", Counter(
            [
                shape.rectified_normals.filter(admin_score__gt=0).count()
                for shape in progress.bar(MaterialShape.objects.filter(photo__synthetic=False))
            ]
        ).most_common()

        for shape in progress.bar(MaterialShape.objects.filter(photo__synthetic=False)):
            normals = shape.rectified_normals.all()
            if not normals or normals.count() < 1:
                continue

            phi_filtered = [json.loads(n.uvnb) for n in normals.filter(admin_score__gt=0)]
            if len(phi_filtered) >= 2:
                filtered_variance.append(normal_variance(phi_filtered))

                phi_orig = [json.loads(n.uvnb) for n in normals]
                orig_variance.append(normal_variance(phi_orig))
                # print '%s --> %s' % (
                # orig_variance[len(orig_variance) - 1],
                # filtered_variance[len(filtered_variance) - 1])

        print "num:", len(orig_variance)
        print "orig_variance:", math.sqrt(mean(orig_variance))
        print "filtered_variance:", math.sqrt(mean(filtered_variance))
Example #2
0
File: init.py Project: paultag/lucy
def main():
    if not args.files:
        raise Exception("WTF - need a config")

    config = args.files.pop(0)
    obj = json.load(open(config, 'r'))

    machines = obj['machines']
    configs = obj['configs']
    users = obj['users']

    puts("Loading users:")
    for conf in progress.bar(users):
        u = User(**conf)
        u.save()

    puts("Loading machines:")
    for conf in progress.bar(machines):
        m = Machine(**conf)
        m.save()

    puts("Loading configs:")
    for conf in progress.bar(configs):
        c = Config(**conf)
        c.save()
    def handle(self, *args, **options):
        #print ('This will recompute the rectified image on all shapes')

        #if raw_input("Are you sure? [y/n] ").lower() != "y":
            #print 'Exiting'
            #return

        print ('Recomputing the rectified image for all shapes')

        shapes = MaterialShape.objects \
            .filter(planar=True, planar_score__isnull=False) \
            .order_by('-planar_score', '-area')

        # delete existing rectified image
        print 'deleting old rectified textures'
        for shape in progress.bar(shapes):
            if shape.image_rectified:
                shape.image_rectified.delete(save=True)
            if shape.thumb_rectified_span3:
                shape.thumb_rectified_span3.delete(save=True)

        print 'computing new rectified textures'
        for shape in progress.bar(shapes):
            if shape.normals.count() < 1:
                continue

            print ''
            rectify_shape(shape)
            print ''
def run():
    stemmer = Stemmer("english")
    pages = db.en.find()
    print colored.yellow("statistic words") 
    wordstatistic = {}
    for page in progress.bar(pages,size=db.en.count()):
        data = page.get("data")
        if not data:continue
        content = data.get("content")
        if not content:
            db.en.remove({"_id":page["_id"]})
            continue
        words = EN_WORD_CUT.split(content)
        for word in words:
            w=stemmer.stemWord(word.strip()).lower()
            if w and len(w)<20 and not w in EN_IGNORE:
                if wordstatistic.get(w):
                    wordstatistic[w]+=1
                else:
                    wordstatistic[w]=1

    
    print colored.yellow("save to en_words_freq")
    savequene = []
    for k,v in progress.bar(wordstatistic.iteritems(),size=len(wordstatistic)):
        savequene.append({"_id":k,"freq":v})
        if len(savequene) >=1000:
            db.en_words_freq.insert(savequene)
            savequene=[]
        
    if savequene:db.en_words_freq.insert(savequene)
    print colored.cyan(
            "count of en_words_freq: %d" % db.en_words_freq.count())
Example #5
0
def filescanner(topdir, jottapath, jfs, errorfile, exclude=None, dry_run=False, prune_files=True, prune_folders=True ):

    errors = {}
    def saferun(cmd, *args):
        log.debug('running %s with args %s', cmd, args)
        try:
            return apply(cmd, args)
        except Exception as e:
            puts(colored.red('Ouch. Something\'s wrong with "%s":' % args[0]))
            log.exception('SAFERUN: Got exception when processing %s', args)
            errors.update( {args[0]:e} )
            return False

    _files = 0

    try:
        for dirpath, onlylocal, onlyremote, bothplaces, onlyremotefolders in jottacloud.compare(topdir, jottapath, jfs, exclude_patterns=exclude):
            puts(colored.green("Entering dir: %s" % dirpath))
            if len(onlylocal):
                _start = time.time()
                _uploadedbytes = 0
                for f in progress.bar(onlylocal, label="uploading %s new files: " % len(onlylocal)):
                    if os.path.islink(f.localpath):
                        log.debug("skipping symlink: %s", f)
                        continue
                    log.debug("uploading new file: %s", f)
                    if not dry_run:
                        if saferun(jottacloud.new, f.localpath, f.jottapath, jfs) is not False:
                            _uploadedbytes += os.path.getsize(f.localpath)
                            _files += 1
                _end = time.time()
                puts(colored.magenta("Network upload speed %s/sec" % ( humanizeFileSize( (_uploadedbytes / (_end-_start)) ) )))

            if prune_files and len(onlyremote):
                puts(colored.red("Deleting %s files from JottaCloud because they no longer exist locally " % len(onlyremote)))
                for f in progress.bar(onlyremote, label="deleting JottaCloud file: "):
                    log.debug("deleting cloud file that has disappeared locally: %s", f)
                    if not dry_run:
                        if saferun(jottacloud.delete, f.jottapath, jfs) is not False:
                            _files += 1
            if len(bothplaces):
                for f in progress.bar(bothplaces, label="comparing %s existing files: " % len(bothplaces)):
                    log.debug("checking whether file contents has changed: %s", f)
                    if not dry_run:
                        if saferun(jottacloud.replace_if_changed, f.localpath, f.jottapath, jfs) is not False:
                            _files += 1
            if prune_folders and len(onlyremotefolders):
                puts(colored.red("Deleting %s folders from JottaCloud because they no longer exist locally " % len(onlyremotefolders)))
                for f in onlyremotefolders:
                    if not dry_run:
                        if saferun(jottacloud.deleteDir, f.jottapath, jfs) is not False:
                            logging.debug("Deleted remote folder %s", f.jottapath)
    except KeyboardInterrupt:
        # Ctrl-c pressed, cleaning up
        pass
    if len(errors) == 0:
        puts('Finished syncing %s files to JottaCloud, no errors. yay!' % _files)
    else:
        puts(('Finished syncing %s files, ' % _files )+
             colored.red('with %s errors (read %s for details)' % (len(errors), errorfile, )))
Example #6
0
    def rip(self,
            session,     # the current session
            frames,      # the audio data
            frame_size,  # bytes per frame
            num_frames,  # number of frames in this delivery
            sample_type, # currently this is always 0, which means 16-bit
                         # signed native endian integer samples
            sample_rate, # audio sample rate, in samples per second
            channels):   # number of audio channels, currently 1 or 2

        self._downloaded += float(frame_size) * float(num_frames)

        if self._ripping:
            # 320 kilobits per second
            # 40 kilobytes per second
            # duration in milliseconds
            # 40 bytes per millisecond
            if not self._pipe:
                self.end_of_track(session)
                return

            total_bytes = float(self._duration) * 40.0
            # 100 = 4.41 (don't ask me why)
            progress_perc = self._downloaded / total_bytes
            progress_perc = progress_perc * (100.0 / 4.41)
            progress.bar(range(100))
            sys.stdout.write('\r > Progress:     %.2f%%' % progress_perc)

            try:
                self._pipe.write(frames);
            except IOError as e:
                print colored.red("ERROR: %s" %e)
                os.kill(os.getpid(), 9)
Example #7
0
def reregister_vouchers(username=None, args=[], simulate=False):
    """Called by :manage:`reregister`. See there."""
    Journal = rt.models.ledger.Journal
    VoucherStates = rt.models.ledger.VoucherStates
    if len(args):
        journals = [Journal.get_by_ref(a) for a in args]
    else:
        journals = Journal.objects.order_by('seqno')
    count = 0
    clear_afterwards = True
    for jnl in journals:
        msg = "Re-register all vouchers in journal {0}".format(jnl)
        puts(msg)
        cl = jnl.get_doc_model()
        qs = cl.objects.filter(journal=jnl, state=VoucherStates.registered)
        qs = qs.order_by('entry_date')
        for obj in progress.bar(qs):
            ses = BaseRequest(user=obj.user)
            obj.register_voucher(ses, not clear_afterwards)
            count += 1

    msg = "{0} vouchers have been re-registered."
    puts(msg.format(count))

    if clear_afterwards:
        msg = "Check clearings for all partners"
        puts(msg)
        qs = rt.models.contacts.Partner.objects.all()
        for obj in progress.bar(qs):
            check_clearings(obj)
def main():
    args = docopt(__doc__)
    feature_name = args['<feature_name>']
    assert feature_name == 'words'
    assert args['<experimentset_name>'] in EXPERIMENT_SETS, '<experimentset_name> must be one of %s' % str(EXPERIMENT_SETS.keys())
    c = get_config()
    experiment_set = EXPERIMENT_SETS[args['<experimentset_name>']](feature_name=feature_name)

    print "Computing foreground group sums using %d cores..." % c.num_cores
    pool = Pool(c.num_cores, init_worker)
    fg_groups = experiment_set.list_foreground_groups()
    cache = {}
    try:
        for group_name, sum_vector in progress.bar(pool.imap_unordered(ComputeForegroundGroupSumCallable(experiment_set), fg_groups), label="Progress ", expected_size=len(fg_groups)):
            cache[group_name] = sum_vector
    except KeyboardInterrupt:
        print "Terminating pool.."
        pool.terminate()
        pool.join()

    print "Computing background sums..."
    bg_groups = experiment_set.list_background_groups()
    for g in bg_groups:
        sum_vector = experiment_set.compute_background_group_sum(g, cache)
        cache[g] = sum_vector

    print "Saving sums to ZODB..."
    zodb_root = open_zodb(read_only=False)
    if getattr(zodb_root, 'group_sums', None) is None:
        zodb_root.group_sums = BTrees.OOBTree.OOBTree()
        transaction.commit()
    if feature_name not in zodb_root.group_sums:
        zodb_root.group_sums[feature_name] = BTrees.OOBTree.OOBTree()
        transaction.commit()
    for k, v in cache.iteritems():
        zodb_root.group_sums[feature_name][k] = v
    transaction.commit()


    print "Creating output db tables..."
    create_db(c.resultsdb_url)
    session_out = open_db(c.resultsdb_url)

    print "Computing overrepresentation using %d cores..." % c.num_cores
    exps = experiment_set.list_experiments()
    cls = experiment_set.result_table_class()
    try:
        for fg, bg, results in progress.bar(pool.imap_unordered(ComputeOverrepresentedWordsCallable(experiment_set), exps), label="Progress ", expected_size=len(exps)):
            for w, odds, pval in results:
                c = cls(foreground_group_name=fg, background_group_name=bg, word=w, odds=odds, pval=pval)
                session_out.add(c)
    except KeyboardInterrupt:
        print "Terminating pool.."
        pool.terminate()
        pool.join()

    print "Committing..."
    session_out.commit()
    print "Done"
Example #9
0
 def process_media( self ):
     puts(colored.yellow("processing media: %s" %(','.join(self.media))))
     for i in progress.bar( self.media ):
         result = self.add_location( i ) 
         self.report.append("%s" % (result,))
     for i in progress.bar( self.download_locations ):
         result = self.add_location( i ) 
         self.report.append("%s" % (result,))
    def update(self):
        """
        - Download new package list
        - Update locally loaded packages
        """

        dp_bootstrap = DjangoPackagesBootstrap(proxy=self.proxy)
        session = Session()

        puts_header("Updating categories...")
        categories = dp_bootstrap.grid_list()
        puts("Category list downloaded 1/1 OK")
        #progress_bar = progress.bar(len(categories))
        for category in progress.bar(categories):

            #progress_bar.next()

            filtered_args = [(key,val) for key,val in category.items() if key in CATEGORY_FIELDS]
            category_model = session.query(Category).filter(Category.slug==category['slug']).first()
            if category_model:
                for key, val in filtered_args:
                    setattr(category_model, key, val)
            else:
                category_model = Category(**dict(filtered_args))
                session.add(category_model)
        session.commit()
        print "Categories updated"

        puts()

        puts_header("Updating packages...")
        packages = dp_bootstrap.app_list()
        puts("Package list downloaded 1/1 OK")
        for package in progress.bar(packages):
            filtered_args = [(key,val) for key,val in package.items() if key in PACKAGE_FIELDS]
            package_model = session.query(Package).filter(Package.slug==package['slug']).first()
            #print "PACKAGE_MODEL", package_model
            if package_model:
                for key, val in filtered_args:
                    setattr(package_model, key, val)
            else:
                package_model = Package(**dict(filtered_args))
                session.add(package_model)

            package_model.set_package_name()

            package_model.categories = []
            if package['grids']:
                if "/api/v1/grid/this-site/" in package['grids']:
                    package['grids'].remove("/api/v1/grid/this-site/")
                categories = session.query(Category).filter(Category.resource_uri.in_(package['grids']))
                for category in categories:
                    package_model.categories.append(category)

        session.commit()
        print "Packages updated"

        self._check_installed_packages()
Example #11
0
def oort(oortpath, jnius):
    GZIPOutputStream = jnius.autoclass("java.util.zip.GZIPOutputStream")
    ObjectOutputStream = jnius.autoclass("java.io.ObjectOutputStream")
    File = jnius.autoclass("java.io.File")
    FileOutputStream = jnius.autoclass("java.io.FileOutputStream")

    from pylire.process.bitsampling import BITS
    from pylire.process.bitsampling import NUM_BITS, NUM_DIMENSIONS, NUM_FUNCTION_BUNDLES

    puts(colored.cyan("Writing HDF5 data to ObjectOutputStream..."))

    if isfile(oortpath):
        unlink(oortpath)

    oortcloud = ObjectOutputStream(GZIPOutputStream(FileOutputStream(File(oortpath))))

    oortcloud.writeInt(NUM_BITS)
    oortcloud.writeInt(NUM_DIMENSIONS)
    oortcloud.writeInt(NUM_FUNCTION_BUNDLES)

    with indent(3, quote="+ "):
        puts(colored.red("(int) NUM_BITS: %d" % NUM_BITS))
        puts(colored.red("(int) NUM_DIMENSIONS: %d" % NUM_DIMENSIONS))
        puts(colored.red("(int) NUM_FUNCTION_BUNDLES: %d" % NUM_FUNCTION_BUNDLES))

        for floatval in progress.bar(BITS.flatten(), label=colored.red(" (float) BITS")):
            oortcloud.writeFloat(floatval)

    oortcloud.flush()
    oortcloud.close()
Example #12
0
        def _build_request():
            body.seek(0, os.SEEK_SET)
            if verbose:
                try:
                    chunk_size = 1048576
                    expected_size = size / chunk_size + 1
                    chunks = chunk_generator(body, chunk_size)
                    progress_generator = progress.bar(
                        chunks,
                        expected_size=expected_size,
                        label=' uploading {f}: '.format(f=key))
                    data = IterableToFileAdapter(progress_generator, size)
                except:
                    print(' uploading {f}'.format(f=key))
                    data = body
            else:
                data = body

            request = S3Request(method='PUT',
                                url=url,
                                headers=headers,
                                data=data,
                                metadata=metadata,
                                access_key=access_key,
                                secret_key=secret_key,
                                queue_derive=queue_derive)
            return request
    def download(self):
        """
        Download the ZIP file in pieces.
        """
        if self.verbosity:
            if self.resume_download:
                self.header("Resuming download of ZIP file")
            else:
                self.header("Downloading ZIP file")

        expected_size = self.current_release_size

        # Prep
        headers = dict()
        if os.path.exists(self.zip_path):
            if self.resume_download:
                headers['Range'] = 'bytes=%d-' % self.local_file_size
                expected_size = expected_size - self.local_file_size
            else:
                os.remove(self.zip_path)

        # Stream the download
        chunk_size = 1024
        req = requests.get(self.url, stream=True, headers=headers)
        n_iters = float(expected_size) / chunk_size + 1
        with open(self.zip_path, 'ab') as fp:
            for chunk in progress.bar(req.iter_content(chunk_size=chunk_size),
                                      expected_size=n_iters):
                fp.write(chunk)
                fp.flush()
Example #14
0
    def build(self, matrix, skim_depth=10):

        """
        Build graph, with PageRanks on nodes.

        :param matrix: A term matrix.
        :param skim_depth: The number of sibling edges.
        """

        # Register nodes and edges.
        for anchor in progress.bar(matrix.terms):

            n1 = matrix.text.unstem(anchor)

            # Heaviest pair scores:
            pairs = matrix.anchored_pairs(anchor).items()
            for term, weight in list(pairs)[:skim_depth]:

                n2 = matrix.text.unstem(term)
                self.graph.add_edge(n1, n2, weight=weight)

        # Compute PageRanks.
        ranks = nx.pagerank(self.graph)
        first = max(ranks.values())

        # Convert to 0->1 ratios.
        ranks = {k: v/first for k, v in ranks.items()}

        # Annotate the nodes.
        nx.set_node_attributes(self.graph, 'pagerank', ranks)
Example #15
0
    def __download(self, q, url):

        request = requests.session()
        ID = self.__getTrakeId(url)
        fullurl = "http://api.soundcloud.com/i1/tracks/{0}/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28&app_version=8bae64e".format(
            ID)
        response = request.get(fullurl).text
        j = json.loads(response)
        link = j["http_mp3_128_url"]
        if link is not None:
            url = link
        else:
            raise Exception("Failed to get download link")

        request = requests.session()

        response = request.get(url, stream=True)

        a, c = self.__getTrackInfo(ID)

        filename = c + ".mp3"

        with open(filename, 'wb') as fd:

            total_length = int(response.headers.get('content-length'))  # taken from http://stackoverflow.com/a/20943461
            for chunk in progress.bar(response.iter_content(chunk_size=1024), expected_size=(total_length / 1024)):
                if chunk:
                    fd.write(chunk)
                    fd.flush()
        filename = [filename, a, c]

        self.__addtags(filename)

        q.task_done()
Example #16
0
def check_indigo_correct(size=1000):

    from chembl_business_model.models import CompoundStructures
    from clint.textui import progress
    import tempfile

    f = tempfile.NamedTemporaryFile(delete=False)
    print "saving to file %s" % f.name
    errorCount = 0
    structures = CompoundStructures.objects.all()
    count = structures.count()
    pk = CompoundStructures._meta.pk.name

    for i in progress.bar(range(0, count, size), label="Indigo check "):
        if i < 0:
            chunk = CompoundStructures.objects.order_by(pk)[:size]
        else:
            last_pk = CompoundStructures.objects.order_by(pk).only(pk).values_list(pk)[i][0]
            chunk = CompoundStructures.objects.order_by(pk).filter(pk__gt=last_pk)[:size]

        for structure in chunk:
            try:
                indigoObj.loadMolecule(str(structure.molfile))
            except Exception as e:
                f.write('%s\t%s\n' % (structure.pk, str(e)))
                errorCount += 1
    f.close()
    print "%s errors saved to %s" % (str(errorCount), f.name)
Example #17
0
def download_files(value):
    """Download WSPRNet Archive Files

    Actions Performed:
        1. Gets archive name from WSPRNet, then downloads the file
        2. Updates the databse via update_status() function"""
    os.chdir(SRC_PATH)
    print("")
    r = requests.get(BASE_URL + value, stream=True)
    with open(value, 'wb') as f:
        total_length = int(r.headers.get('content-length'))
        for sect in progress.bar(r.iter_content(chunk_size=1024),
                                 label=("* " + value + " "), expected_size=(total_length / 1024) + 1):
            if sect:
                f.write(sect)
                f.flush()
    f.close()

    # now that the file has been downloaded to SRC_PATH, update the status table
    extract_file(value)
    fname = (value)
    utime = time.strftime("%Y-%b-%d", time.gmtime())
    lines = (records)
    columns = (csv_cols)
    
    update_stats(value, utime, columns, lines)
    clean_csv_path()
Example #18
0
def get_file_with_progress(url, file, do_tar=True, path="."):
    r = requests.get(url, stream=True)
    with open(file, "wb") as f:
        print("Downloading %s" % file)
        total_length = int(r.headers.get('content-length'))
        for chunk in progress.bar(r.iter_content(chunk_size=1024),
                                  expected_size=(total_length/1024) + 1):
            if chunk:
                f.write(chunk)
                f.flush()
        f.close()
        r.close()
    if do_tar:
        try:
            tar = tarfile.open(file, "r:*")
            if tarfile.is_tarfile(file):
                for item in tar:
                    print("Extracting %s" % item.name)
                    tar.extract(item, path)
            tar.close()
        except tarfile.ReadError:
            with zipfile.ZipFile(file, "r") as zip_file:
                for item in zip_file.namelist():
                    print("Extracting %s" % item)
                    zip_file.extract(item, path)
def update_participants_list(site, wikitext):
    """
    Given a Pywikibot Site and some wikitext containing a participants list,
    create an "Inactive participants" section and move users there as needed.
    """

    # Obtain the region of wikitext that contains usernames
    first_line = next(x for x in wikitext.splitlines() if line_to_username(x))
    last_line = next(x for x in reversed(wikitext.splitlines()) if line_to_username(x))
    old_participants_list = wikitext[wikitext.find(first_line):wikitext.find(last_line) + len(last_line)]

    # If the page doesn't use a numbered or bulleted list, we really shouldn't parse it
    if not LIST_ITEM.match(first_line):
        print("Error! I can't recognize that list format.")
        return wikitext

    activity_lists = {True: [], False: []}
    for username, line in progress.bar(wikitext_to_usernames(wikitext)):
        activity_lists[is_user_active(site, username)].append(line)
    active_users = "\n".join(activity_lists[True])
    inactive_users = "\n".join(activity_lists[False])

    new_participants_list = TEMPLATE.format(unicode(active_users),
                                            unicode(inactive_users))
    return wikitext.replace(old_participants_list, new_participants_list)
Example #20
0
    def download(self, url, path):
        """
        Download a remote file
        :param url: Remote URL
        :param path: Destination directory
        :return: Destination full file path

        .. todo:: Fix OSX bug where we have to replace https by http
        """
        if __platform__ == 'darwin':
            url = url.replace('https', 'http')

        path = self.__control_path(path)

        req = self.req.get(url, stream=True, timeout=30)

        if req.status_code is not 200:
            raise Exception('Can\'t get file')

        with open(path, 'wb') as dest_file:
            total_length = int(req.headers.get('content-length'))
            if total_length > 0:
                try:
                    for chunk in progress.bar(req.iter_content(chunk_size=1024), expected_size=(total_length / 1024) + 1,
                                              label='Downloading: '):
                        if chunk:
                            dest_file.write(chunk)
                            dest_file.flush()
                    return path
                except KeyboardInterrupt:
                    # Removing file to prevent broken files on user computer
                    os.remove(path)
                    raise Exception('Download interrupted by user')
            else:
                raise Exception('Empty file size')
Example #21
0
    def es_stream_docs(cls):

        """
        Stream Elasticsearch docs.

        Yields:
            dict: The next document.
        """

        for t in progress.bar(cls.rank_texts()):

            text = t['text']

            yield dict(

                _id         = text.id,
                corpus      = text.corpus,
                identifier  = text.identifier,
                url         = text.url,

                authors     = text.pretty('authors'),
                title       = text.pretty('title'),
                publisher   = text.pretty('publisher'),
                date        = text.pretty('date'),
                journal     = text.pretty('journal_title'),

                count       = text.count,
                rank        = t['rank'],
                score       = t['score'],

            )
Example #22
0
def background_basic(data):
    """
    Simply scans for single events, does not look forwards or backwards in time.
    """
    single_counts = [ [] for i in range(data.ncol) ]
    
    blocksize = 1000
    start = 0
    for a in progress.bar(range(data.nrows/blocksize/1)):
        try:
            x = data[start:(start+blocksize)]
        except:
            break
        if len(x) == 0:
            break
        
        # iterate over each row, if no other signal in anyother channel, apltend value
        for row in x:
            i = row.nonzero()[0]
            if len(i) == 1:
                single_counts[i].append(float(row[i]))
        
        start += blocksize

    return single_counts
Example #23
0
def read_project_file(fin):
    from clint.textui import progress
    print "Reading projects"

    total = len(fin.readlines())
    fin.seek(0)

    headers = None
    projects = []
    entity_mapping = {}
    entity_codes = {}
    csv_reader = _unicode_csv_reader(fin, delimiter=',', quotechar='"')
    for cells in progress.bar(csv_reader, expected_size=total):
        if not headers:
            headers = _build_headers(cells)
            continue
        else:
            project = _build_project(cells, headers)
            _convert_to_right_type(project)
            _map_entities(project, entity_codes, entity_mapping)

            projects.append(project)

    print "Read {0} projects".format(len(projects))
    return projects, entity_mapping
def download_file(url, out_path, resume=False, verbosity=1,
                  output_stream=sys.stdout, expected_size=None,
                  chunk_size=1024):

    if verbosity and output_stream:
        output_stream("Downloading ZIP file")

    if expected_size is None:
        resp = requests.head(url)
        expected_size = int(resp.headers.get('content-length', 0))

    # Prep
    headers = dict()
    if os.path.exists(out_path):
        if resume:
            cur_sz = os.path.getsize(out_path)
            headers['Range'] = 'bytes=%d-' % cur_sz
            expected_size = expected_size - cur_sz
        else:
            os.remove(out_path)

    # Stream the download
    req = requests.get(url, stream=True, headers=headers)
    n_iters = float(expected_size) / chunk_size + 1
    with open(out_path, 'ab') as fp:
        for chunk in progress.bar(req.iter_content(chunk_size=chunk_size),
                                  expected_size=n_iters):
            fp.write(chunk)
            fp.flush()
Example #25
0
def rejar(jarpth, fresh_content_map={}, compression=zipfile.ZIP_DEFLATED):
    if not isfile(jarpth):
        raise IOError("No jar: %s" % jarpth)

    puts(colored.cyan("Re-jarring '%s' with %d possible replacements:" % (basename(jarpth), len(fresh_content_map))))

    with indent(3, quote=" *"):
        for fresh_key in fresh_content_map.keys():
            puts(colored.cyan(fresh_key))

    print()

    oldjar = zipfile.ZipFile(jarpth, mode="r")
    newjar = zipfile.ZipFile(tempfile.mktemp(suffix=".zip"), mode="w", compression=compression)

    for item in progress.bar(oldjar.infolist(), label=colored.cyan("Re-jar: %s" % basename(jarpth))):
        replace = basename(item.filename) in fresh_content_map
        content = replace and fresh_content_map[basename(item.filename)] or oldjar.read(item.filename)
        replace and puts(colored.yellow("Replaced %s" % item.filename))
        newjar.writestr(item, content)

    print()
    oldjarpth = oldjar.filename
    newjarpth = newjar.filename
    oldjar.testzip()
    oldjar.close()
    shutil.move(oldjar.filename, oldjar.filename + ".backup")

    newjar.testzip()
    newjar.close()
    _copy(newjarpth, oldjarpth)

    puts(colored.yellow("Finished restructuring jar: %s" % oldjarpth))
    print()
    def handle(self, *args, **options):
        self.set_config(*args, **options)
        self.header("Sampling %i rows from %s source files" % (
            self.sample_rows,
            len(self.tsv_list),
        ))

        # Make sure sample dir exists and is empty
        os.path.exists(self.test_data_dir) or os.makedirs(self.test_data_dir)
        os.path.exists(self.sample_dir) and shutil.rmtree(self.sample_dir)
        os.makedirs(self.sample_dir)

        # Loop through all the files in the source directory
        for name in progress.bar(self.tsv_list):

            # Find the input
            file = os.path.join(self.tsv_dir, name)
            out_file = os.path.join(self.sample_dir, name)

            if self.verbosity > 2:
                self.log(" Sampling %s" % file)

            # Open the file
            fi = FileInput(file, True)

            # Generate our sample
            sample = two_pass_sample(fi, sample_size=self.sample_rows)

            # Open our output file
            out = open(out_file, 'wb')

            # Write it out
            for line in chain(fi.header, sample):
                out.write(line)
Example #27
0
def processManifest(args):
    manifestPath = os.path.join(args.baseDir, "sprites.mf")
    if not os.path.exists(manifestPath):
        raise Usage("Manifest not found at %s." % (red(manifestPath, bold=True)), (manifestPath,))
    lineCount = len(open(manifestPath).readlines())

    manifest = csv.DictReader(open(manifestPath), skipinitialspace=True)
    manifest.fieldnames = ["filename", "spritesheet"]
    spritesheets = {}

    for line in progress.bar(manifest, label="Reading Manifest: ", expected_size=lineCount):
        sheet = line["spritesheet"]
        image = line["filename"]
        imagePath = os.path.join(args.baseDir, image)
        if not os.path.exists(imagePath):
            raise Usage(
                "Image not found at %s from %s, %s."
                % (
                    red(imagePath, bold=True),
                    blue(manifestPath, bold=True),
                    blue("line " + str(manifest.line_num), bold=True),
                ),
                (imagePath, manifestPath, manifest.line_num),
            )
        spritesheets.setdefault(sheet, Spritesheet(sheet)).addImage(image)
    return spritesheets.values()
    def handle(self, *args, **options):
        self.dc_geo_maps_file = os.path.join(settings.ROOT_DIR.root, 'dataviz/static/data.zip')
        self.dataviz_static_folder = os.path.join(settings.ROOT_DIR.root, 'dataviz/static/')
        if not os.path.isfile(self.dc_geo_maps_file):
            self.stdout.write(self.style.ERROR('Warning, this might take awhile depending on your internet connection'))
            response = requests.get('https://s3.amazonaws.com/dctraffic/data.zip', stream=True)

            if not response.ok:
                # Something went wrong
                self.stdout.write(self.style.ERROR('ERROR'))

            else:
                self.stdout.write(self.style.SUCCESS('Downloading %s' % self.dc_geo_maps_file))
                with open(self.dc_geo_maps_file, 'wb') as handle:
                    total_length = int(response.headers.get('content-length')) / 1024 + 1
                    # Override the expected_size, for iterables that don't support len()
                    for block in progress.bar(response.iter_content(1024), expected_size=total_length):
                        handle.write(block)
                        handle.flush()

                self.stdout.write(self.style.SUCCESS('Finished Download %s' % self.dc_geo_maps_file))
        if os.path.isfile(self.dc_geo_maps_file):
            zfile = zipfile.ZipFile(self.dc_geo_maps_file)
            zfile.extractall(self.dataviz_static_folder)
            self.stdout.write(self.style.SUCCESS('Unzipped all files in %s' % self.dc_geo_maps_file))
Example #29
0
        def _build_request():
            body.seek(0, os.SEEK_SET)
            if verbose:
                try:
                    # hack to raise exception so we get some output for
                    # empty files.
                    if size == 0:
                        raise Exception

                    chunk_size = 1048576
                    expected_size = size / chunk_size + 1
                    chunks = chunk_generator(body, chunk_size)
                    progress_generator = progress.bar(
                        chunks,
                        expected_size=expected_size,
                        label=' uploading {f}: '.format(f=key))
                    data = IterableToFileAdapter(progress_generator, size)
                except:
                    print(' uploading {f}'.format(f=key))
                    data = body
            else:
                data = body

            headers.update(self.session.headers)
            request = S3Request(method='PUT',
                                url=url,
                                headers=headers,
                                data=data,
                                metadata=metadata,
                                access_key=access_key,
                                secret_key=secret_key,
                                queue_derive=queue_derive)
            return request
Example #30
0
def geturl(url, dst):
    if os.path.isfile(dst):
        halt_exec = prompt.options("A file already exists with that name. Continue?", options=[
            {'selector': 'Y', 'prompt': 'yes', 'return': False},
            {'selector': 'N', 'prompt': 'no', 'return': True}
        ])
        if halt_exec:
            raise SystemExit('Move the file and try again')
    try:
        req = requests.get(url, stream=True)
        req.raise_for_status()
    except requests.exceptions.HTTPError:
        raise SystemExit(
            "There was an error retrieving the data. Check your internet connection and try again."
        )
    with open(dst, "wb") as f:
        total_length = req.headers.get('content-length')

        if total_length is None: # no content length header
            f.write(req.content)
        else:
            total_length = int(total_length)
            for chunk in progress.bar(req.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
                if chunk:
                    f.write(chunk)
                    f.flush()
Example #31
0
def progress_bar(enumerable, logger, **kwargs):
    """
    Show the progress bar in the terminal, if the logging level matches and we are interactive.

    :param enumerable: The iterator of which we indicate the progress.
    :param logger: The bound logging.Logger.
    :param kwargs: Keyword arguments to pass to clint.textui.progress.bar.
    :return: The wrapped iterator.
    """
    if not logger.isEnabledFor(
            logging.INFO) or sys.stdin.closed or not sys.stdin.isatty():
        return enumerable
    return progress.bar(enumerable, **kwargs)
Example #32
0
def plot_many_burndown(args, target, header, parts):
    if not args.output:
        print("Warning: output not set, showing %d plots." % len(parts))
    itercnt = progress.bar(parts, expected_size=len(parts)) \
        if progress is not None else parts
    stdout = io.StringIO()
    for name, matrix in itercnt:
        backup = sys.stdout
        sys.stdout = stdout
        plot_burndown(args, target,
                      *load_burndown(header, name, matrix, args.resample))
        sys.stdout = backup
    sys.stdout.write(stdout.getvalue())
Example #33
0
def download_zip(url, download_path=None):
    if download_path is None:
        download_path = tempfile.NamedTemporaryFile(suffix='.zip').name
    r = requests.get(url, stream=True)
    print('Downloading url --> %s\nto --> %s' % (url, download_path))
    with open(download_path, 'wb') as f:
        total_length = int(r.headers.get('content-length'))
        for chunk in progress.bar(r.iter_content(chunk_size=1024),
                                  expected_size=(total_length/1024) + 1):
            if chunk:
                f.write(chunk)
                f.flush()
    return download_path
Example #34
0
    def handle(self, *args, **options):
        with transaction.atomic():
            for label in progress.bar(ShapeSubstanceLabel.objects.all()):
                new_hit_settings = label.mturk_assignment.hit.hit_type.experiment.new_hit_settings
                shape = label.shape
                substance = label.substance
                if (shape.substances.filter(substance=substance).count() >=
                        new_hit_settings.min_assignment_consensus):
                    shape.substance = substance
                    shape.update_entropy(save=False)
                    shape.save()

        with transaction.atomic():
            for label in progress.bar(MaterialShapeNameLabel.objects.all()):
                new_hit_settings = label.mturk_assignment.hit.hit_type.experiment.new_hit_settings
                shape = label.shape
                name = label.name
                if (shape.names.filter(name=name).count() >=
                        new_hit_settings.min_assignment_consensus):
                    shape.name = name
                    shape.update_entropy(save=False)
                    shape.save()
Example #35
0
    def handle(self, *args, **options):

        admin_user = User.objects.get_or_create(
            username='******')[0].get_profile()
        scene_category, _ = PhotoSceneCategory.objects \
                .get_or_create(name='kitchen')

        for id in progress.bar(xrange(1, 37)):
            photo = Photo.objects.get(id=id)
            if not photo.scene_category:
                photo.scene_category = scene_category
            photo.scene_category_correct = True
            photo.save()
Example #36
0
def get_tape_data():
    if not os.path.isfile(data_path / "training" / "train.json"):
        print("Downloading training set...")
        url = 'https://polybox.ethz.ch/index.php/s/chy7bf5a4xKvuds/download'
        r = requests.get(url, allow_redirects=True, stream=True)
        with open(data_path / "training" / "train.json", 'wb') as f:
            total_length = int(r.headers.get('content-length'))
            for chunk in progress.bar(r.iter_content(chunk_size=1024),
                                      expected_size=(total_length / 1024) + 1):
                if chunk:
                    f.write(chunk)
                    f.flush()
        print("Finished downloading training set.\n")

    if not os.path.isfile(data_path / "validation" / "val.json"):
        print("Downloading validation set...")
        url = 'https://polybox.ethz.ch/index.php/s/4SrHcGRQNX84OF7/download'
        r = requests.get(url, allow_redirects=True, stream=True)
        with open(data_path / "validation" / "val.json", 'wb') as f:
            total_length = int(r.headers.get('content-length'))
            for chunk in progress.bar(r.iter_content(chunk_size=1024),
                                      expected_size=(total_length / 1024) + 1):
                if chunk:
                    f.write(chunk)
                    f.flush()
        print("Finished downloading validation set.\n")

    if not os.path.isfile(data_path / "test" / "test.json"):
        print("Downloading test set...")
        url = 'https://polybox.ethz.ch/index.php/s/0DtcjzDfcHL0yAP/download'
        r = requests.get(url, allow_redirects=True, stream=True)
        with open(data_path / "test" / "test.json", 'wb') as f:
            total_length = int(r.headers.get('content-length'))
            for chunk in progress.bar(r.iter_content(chunk_size=1024),
                                      expected_size=(total_length / 1024) + 1):
                if chunk:
                    f.write(chunk)
                    f.flush()
        print("Finished downloading test set.\n")
def download(url, filepath):
    """ Download data from a URL with a handy progress bar """

    print("Downloading: {}".format(url))
    r = requests.get(url, stream=True)

    with open(filepath, 'wb') as f:
        content_length = int(r.headers.get('content-length'))
        for chunk in progress.bar(r.iter_content(chunk_size=1024),
                                  expected_size=(content_length / 1024) + 1):
            if chunk:
                f.write(chunk)
                f.flush()
def download_file(url):
    local_filename = url.split('/')[-1]
    with requests.Session() as session:
        session.auth = (unm, pwd)
        r1 = session.request('get', url)
        r = session.get(r1.url, auth=(unm, pwd), stream = True)
        if r.ok:
            with open(local_filename, 'wb') as f:
                total_length = int(r.headers.get('content-length'))
                for chunk in progress.bar(r.iter_content(chunk_size=8192), expected_size=(total_length/8192) + 1): 
                    if chunk:
                        f.write(chunk)
                        f.flush()
Example #39
0
def download(path, url, name, filetype):
    filepath = '%s/%s.%s' % (path, name, filetype)
    if os.path.exists(filepath):
        return

    response = requests.get(url, headers=headers, stream=True)
    with open(filepath, "wb") as fp:
        total_length = int(response.headers.get('content-length'))
        for ch in progress.bar(response.iter_content(chunk_size=2391975),
                               expected_size=(total_length / 1024) + 1):
            if ch:
                fp.write(ch)
    print('download success :: %s' % filepath)
Example #40
0
def list_category(category_name):
    'Gets a list of album covers that intersect with the specified category'
    album_covers_cat = pywikibot.Category(wiki, title="Category:Album covers")
    category_name = (category_name if category_name.startswith("Category:")
                     else "Category:" + category_name)
    other_cat = pywikibot.Category(wiki, title=category_name)

    # Get a list of album covers
    album_covers = []
    num_album_covers = album_covers_cat.categoryinfo["files"]
    for album_cover in progress.bar(album_covers_cat.articles(),
                                    label="Getting titles ",
                                    expected_size=num_album_covers):
        album_covers.append(album_cover.title(withNamespace=False))
    album_covers = key_on_first_letter(album_covers)

    # Get a list of other images
    num_other = other_cat.categoryinfo["files"]
    other_list = []
    for other in progress.bar(other_cat.articles(),
                              label="Getting other titles ",
                              expected_size=num_other):
        other_list.append(other.title(withNamespace=False))
    other_dict = key_on_first_letter(other_list)

    # Get that intersection
    result_list = {}
    for letter in progress.bar(album_covers,
                               label="Intersecting "):
        if letter not in other_dict:
            continue

        for album_cover in album_covers[letter]:
            if album_cover in other_dict[letter]:
                result_list[letter] = result_list.get(letter, []) +\
                                      [album_cover]
                other_dict[letter].remove(album_cover)

    return result_list
Example #41
0
def save_file(url):
    local_filename = url.split('/')[-1]
    remove_file_if_exists(local_filename)
    print('* Getting file: ' + local_filename)
    response = fetch(url)
    total_length = int(response.headers.get('content-length'))
    with open(local_filename, 'wb') as f:
        for chunk in progress.bar(response.iter_content(chunk_size=1024),
                                  expected_size=(total_length / 1024) + 1):
            if chunk:
                f.write(chunk)
                f.flush()
    return local_filename
Example #42
0
    def update(self):
        self.speak("After downloading file please replace this file with the downloaded one")
        url = '# url after uploading file'
        r = requests.get(url, stream=True)

        with open("Voice.py", "wb") as Pypdf:

            total_length = int(r.headers.get('content-length'))

            for ch in progress.bar(r.iter_content(chunk_size=2391975),
                                   expected_size=(total_length / 1024) + 1):
                if ch:
                    Pypdf.write(ch)
Example #43
0
def download_file(url, dst):
    local_filename = url.split('/')[-1]
    with requests.get(url, stream=True) as r:
        r.raise_for_status()
        file_path = dst + "/" + local_filename
        with open(file_path, 'wb') as f:
            print("downloading file: " + file_path)
            total_length = int(r.headers.get('content-length'))
            for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
                if chunk:
                    f.write(chunk)
                    f.flush()
    return local_filename
Example #44
0
def get_df_trie(tries):
    """
    Return a trie where the prefix's value is its document frequency among the tries.
    """
    df_trie = pygtrie.StringTrie(separator=" ")
    for _, trie in progress.bar(tries, expected_size=len(tries)):
        for prefix in trie.keys():
            if prefix not in df_trie:
                df_trie[prefix] = 0
            df_trie[prefix] += 1
    for prefix in df_trie.keys():
        df_trie[prefix] = (df_trie[prefix] - 1) / len(tries)
    return df_trie
Example #45
0
def decrypt_progress(inf, outf, key, length):
    cipher = AES.new(key, AES.MODE_ECB)
    assert length % 16 == 0
    chunks = length // 4096 + 1
    for i in progress.bar(range(chunks)):
        block = inf.read(4096)
        if not block:
            break
        decblock = cipher.decrypt(block)
        if i == chunks - 1:
            outf.write(unpad(decblock))
        else:
            outf.write(decblock)
def prepare_tiles(size, in_directory, out_directory):
    tiles = {}
    for f in progress.bar(os.listdir(in_directory)):
        try:
            image = Image.open(os.path.join(in_directory, f))
            image = ImageOps.fit(image, (args.size, args.size))
            image.save(os.path.join(out_directory, f), "JPEG")

            tiles[os.path.join(out_directory, f)] = image_average_color(image)
        except IOError:
            print "File '%s' not an image or corrupted, skipping. " % f

    return tiles
Example #47
0
def download_zip(url, path):
    r = requests.get(url, stream=True)
    temp = tempfile.NamedTemporaryFile(suffix='.zip')
    print('Downloading url --> %s\nto --> %s' % (url, temp.name))
    with open(temp.name, 'wb') as f:
        total_length = int(r.headers.get('content-length'))
        for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
            if chunk:
                f.write(chunk)
                f.flush()
    z = zipfile.ZipFile(temp)
    print("extracting to --> %s" % path)
    z.extractall(path=path)
def fetchfile(url, filename, filesize):
    asset_file_data = get(url, stream=True)
    with open(filename, "wb") as out_file:
        print(filename)
        total_length = int(filesize)
        for chunk in bar(
                asset_file_data.iter_content(chunk_size=4096),
                expected_size=(total_length / 4096) + 1,
        ):
            if chunk:
                out_file.write(chunk)
                out_file.flush()
    out_file.close()
Example #49
0
def setup_url(url, name):
    r = requests.get(url, stream=True)
    filename = os.path.basename(url)
    path = os.path.join(HOME, 'tmp', filename)
    length = int(r.headers['content-length'])
    with tempfile.TemporaryFile() as f:
        for chunk in progress.bar(r.iter_content(chunk_size=1024),
                                  label=filename,
                                  expected_size=(length / 1024) + 1):
            if chunk:
                f.write(chunk)
                f.flush()
        f.flush()
        f.seek(0)
        tar = tarfile.open(fileobj=f)
        files = tar.getnames()
        vmx = None
        for i in files:
            if i.endswith('vmx'):
                vmx = i
            if i.startswith('/') or i.startswith('..'):
                puts(
                    colored.red(
                        "This box is comprised of filenames starting with '/' or '..'"
                    ))
                puts(colored.red("Exiting for the safety of your files"))
                exit()
        if vmx:
            puts(colored.green("Extracting..."))
            if not name:
                folder, dot, ext = vmx.rpartition('.')
                path = os.path.join(HOME, folder)
                os.mkdir(os.path.join(HOME, folder), 0755)
            else:
                path = os.path.join(HOME, name)
                os.mkdir(os.path.join(HOME, name), 0755)

            config = {
                'vmx':
                os.path.join(path, vmx),
                'url':
                url,
                'user':
                prompt.query("What username would you like to save?",
                             default='mech')
            }
            tar.extractall(path)
            save_mechfile(config, path)
            save_mechfile(config, '.')
            return os.path.join(path, vmx)
    return os.path.abspath(path)
Example #50
0
def download_wallpaper(start_page, max_pages):
    driver = login()

    try:
        while start_page <= max_pages:
            driver.get(url + str(start_page))
            time.sleep(1)
            print(f'Loading page: {start_page}/{max_pages}')
            start_page += 1
            # Find image thumbnail element
            elements = driver.find_elements_by_class_name('item')
            print(f'Number of images: {len(elements)}')

            # Extract image page links from thumbnail element
            image_links = [
                element.find_element_by_tag_name('a').get_attribute('href')
                for element in elements
            ]
            # print(image_links)

            for image in image_links:
                driver.get(image)
                # Extract the download link of image
                download = driver.find_element_by_xpath('/html/body/section/div[2]/div[1]/div/div/div[1]/div[2]/a')\
                    .get_attribute('href')

                # Extract image name from the links "https://get.wallhere.com/photo/anime-1581485.png"
                filename = download.split('/')[-1]
                file_path = os.path.join(image_dir, filename)

                if not is_file_exists(filename):
                    print(f'Downloading image: {filename} ...')
                    r = requests.get(download, stream=True)
                    # Save image
                    with open(file_path, 'wb') as f:
                        # Extract size of the file
                        total_length = int(r.headers.get('content-length'))
                        # Shows progress bar while downloading the image
                        for chunk in progress.bar(
                                r.iter_content(chunk_size=1024),
                                expected_size=(total_length / 1024) + 1):
                            if chunk:
                                f.write(chunk)
                                f.flush()
                    print(f'Download complete: {filename}')
                else:
                    print('Already exists!')

    finally:
        # Close the webdriver
        driver.quit()
Example #51
0
    def get_download_app(self, name):

        url = config.app['search']['url']
        downPath = ""

        payload = {'query': name, 'limit': 1}

        response = requests.get(url, params=payload)

        for x in response.json()['datalist']['list']:

            print "\n"
            print "------------------- [ APP ] -------------------"
            print "| ID: " + str(x['id']) + "     |"
            print "| Name: " + str(x['name']) + "     |"
            print "| Size: " + str(x['size']) + "     |"
            print "-----------------------------------------------"
            print "Package: " + str(x['package'])
            print "-> Version Name: " + str(x['file']['vername'])
            print "-> Version Code: " + str(x['file']['vercode'])
            print "-> MD5: " + str(x['file']['md5sum'])
            print "-> Path: " + str(x['file']['path']) + "\n"

            dowSize = str(x['size'])
            downPath = str(x['file']['path'])

        promt = raw_input("[ Download y/n ] ")

        if promt == "y":
            # filename
            local_filename = name + ".apk"

            if os.path.exists(local_filename):
                print "File Exists\n"
                return False
            else:

                r = requests.get(downPath, stream=True)
                with open(local_filename, 'wb') as f:
                    total_length = int(r.headers.get('content-length'))
                    for chunk in progress.bar(
                            r.iter_content(chunk_size=1024),
                            expected_size=(total_length / 1024) + 1):
                        if chunk:
                            f.write(chunk)
                            f.flush()

            print "\nSuccess!"

        else:
            print "Cancelled\n"
Example #52
0
def groups_flood_bot():
    """bot for groups flood"""
    init()
    choice = int(input('1 - Искать группы по категориям, '
                       '2 - Список групп из файла: '))
    if choice == 1:
        category = input('Введите категорию: ')
        find_group = vk_api.groups.search(q=category, type='group', count=1000)
        print(colored('Найдено {} групп'.format(find_group[0]), 'green'))

        message_for_flood = input('Введите сообщение: ')
        set_time = int(input('Укажите время задержки [Рекомендовано 15 - 20 секунд]: '))
        for group_id in find_group[1:]:  # Start Flood
            try:
                print('Постим в: ID ', group_id['gid'])
                vk_api.wall.post(owner_id=-group_id['gid'],
                                 message=message_for_flood)
                for _ in progress.bar(range(set_time)):
                    sleep(1)
                print(colored('Сообщение доставлено.', 'green'))
            except Exception:
                print(colored('Сообщение не доставлено.', 'red'))
                sleep(0.3)
    elif choice == 2:
        path = input('Файл: ')
        delay = int(input('Укажите время задержки [Рекомендовано 15 - 20 секунд]: '))
        message = input('Введите сообщение: ')
        with open(path, 'r') as f:
            for i in cycle(f):
                try:
                    print('Постим в: ID {}'.format(i))
                    vk_api.wall.post(owner_id=-int(i), message=message)
                    for _ in progress.bar(range(delay)):
                        sleep(1)
                    print(colored('Сообщение доставлено', 'green'))
                except Exception:
                    print(colored('Сообщение не доставлено', 'red'))
                    sleep(1)
def dwnfile(url,name):
	rsp = requests.get(url, stream=True)
	length=rsp.headers.get('content-length')
	if length==None:
		with open(name, 'wb') as f:
			for chunk in rsp.iter_content(chunk_size=1024):
				if chunk:
					f.write(chunk)
	else :
		length=int(length)
		with open(name, 'wb') as f:
			for chunk in progress.bar(rsp.iter_content(chunk_size=1024), expected_size=(length/1024) + 1):
				if chunk:
					f.write(chunk)
 def handle(self, *args, **options):
     terms = Term.objects.all()
     print "Updating terms: "
     for term in progress.bar(terms, every=max(1, len(terms) / 100)):
         code = self.TA_MAPPING.get(term.code[:3],
                                    self.TA_MAPPING.get(term.code[:5], ""))
         if code != '':
             term.system = code
             term.save()
     topohraphy_images = Image.objects.filter(
         category__name_cs="15 Topografie")
     topohraphy_terms = Term.objects.filter(id__in=Path.objects.filter(
         image__in=topohraphy_images).select_related('term').values_list(
             'term', flat=True).distinct())
     print "Updating topohraphy terms: "
     for term in progress.bar(topohraphy_terms,
                              every=max(1,
                                        len(topohraphy_terms) / 100)):
         if (term is not None and term.slug != 'too-small'
                 and term.slug != 'no-practice'
                 and '15' not in term.system):
             term.system += '15'
             term.save()
Example #55
0
def downloaders(package, selected_proxy):
    url = package["url"]
    filename = package["title"]

    print("Downloading file named {} by proxy {}...".format(
        package, selected_proxy))
    r = requests.get(url, stream=True, proxies=selected_proxy)
    with open("data/" + filename + ".mp3", 'wb') as f:
        total_length = int(r.headers.get('content-length'))
        for chunk in progress.bar(r.iter_content(chunk_size=1024),
                                  expected_size=(total_length / 1024) + 1):
            if chunk:
                f.write(chunk)
            f.flush()
 def download(self):
     url = self.get_url()
     r = self.rs.get(url, stream=True)
     chunk_size = 1024**2  # 1MB
     print('HTTP status code: {}'.format(r.status_code))
     with open(self.filename, 'wb') as f:
         total_length = int(r.headers.get('content-length'))
         for chunk in progress.bar(
                 r.iter_content(chunk_size=chunk_size),
                 expected_size=(total_length / chunk_size) + 1):
             if chunk:  # filter out keep-alive new chunks
                 f.write(chunk)
                 f.flush()
     print('Finished downloading {}'.format(self.filename))
Example #57
0
def download(making_links):
    for link in making_links:
        file_name = link.split('/')[-1]
        print("Downloading : " + file_name)
        r = requests.get(link, stream=True)
        with open(file_name, 'wb') as f:
            total_length = int(r.headers.get('content-length'))
            for chunk in progress.bar(r.iter_content(chunk_size=1024),
                                      expected_size=(total_length / 1024) + 1):
                if chunk:
                    f.write(chunk)
                    f.flush()
        print(file_name + " Downloaded ;)")
    print("All videos Downloaded")
Example #58
0
    def download(self,
                 packageName,
                 versionCode,
                 offerType=1,
                 progress_bar=False):
        """Download an app and return its raw data (APK file).

        packageName is the app unique ID (usually starting with 'com.').

        versionCode can be grabbed by using the details() method on the given
        app."""
        path = "purchase"
        data = "ot=%d&doc=%s&vc=%d" % (offerType, packageName, versionCode)
        message = self.executeRequestApi2(path, data)

        url = message.payload.buyResponse.purchaseStatusResponse.appDeliveryData.downloadUrl
        cookie = message.payload.buyResponse.purchaseStatusResponse.appDeliveryData.downloadAuthCookie[
            0]

        cookies = {
            str(cookie.name):
            str(cookie.value)  # python-requests #459 fixes this
        }

        headers = {
            "User-Agent":
            "AndroidDownloadManager/4.4.3 (Linux; U; Android 4.4.3; Nexus S Build/JRO03E)",
            "Accept-Encoding": "",
        }

        if not progress_bar:
            response = requests.get(url,
                                    headers=headers,
                                    cookies=cookies,
                                    verify=ssl_verify)
            return response.content
        # If progress_bar is asked
        from clint.textui import progress
        response_content = str()
        response = requests.get(url,
                                headers=headers,
                                cookies=cookies,
                                verify=ssl_verify,
                                stream=True)
        total_length = int(response.headers.get('content-length'))
        for chunk in progress.bar(response.iter_content(chunk_size=1024),
                                  expected_size=(total_length / 1024) + 1):
            if chunk:
                response_content += chunk
        return response_content
Example #59
0
    def clean(self):
        """
        Clean up the raw data files from the state so they are
        ready to get loaded into the database.
        """
        if self.verbosity:
            self.header("Cleaning data files")

        # Loop through all the files in the source directory
        tsv_list = os.listdir(self.tsv_dir)
        for name in progress.bar(tsv_list):
            call_command("cleancalaccessrawfile",
                         name,
                         verbosity=self.verbosity)
Example #60
0
    def load(self):
        """
        Loads the cleaned up csv files into the database
        """
        if self.verbosity:
            self.header("Loading data files")

        model_list = get_model_list()
        for model in progress.bar(model_list):
            call_command(
                "loadcalaccessrawfile",
                model.__name__,
                verbosity=self.verbosity,
            )