Exemplo n.º 1
0
def draw(zDataCSVName):
    z_data = pd.read_csv(os.path.join(StartingDataDirectory, zDataCSVName))
    print(z_data)
    data = [
        go.Surface(
            z=z_data.to_numpy(),
            contours=go.surface.Contours(
                z=go.surface.contours.Z(
                    show=True,
                    usecolormap=True,
                    highlightcolor="#42f462",
                    project=dict(z=True)
                )
            )
        )
    ]

    layout = go.Layout(
        title='Testing Plotly',
        autosize=False,
        width=500,
        height=500,
        margin={
            'l':60,
            'r':50,
            'b':65,
            't':90
        }
    )

    fig=go.Figure(data=data, layout=layout)
    spinner = Spinner('Drawing ')
    plotly.iplot(fig, filename='elevations-3d-suface')
    spinner.finish()
Exemplo n.º 2
0
def main(input_path, output_filename):
    """
    Google Vison API to CSV
  """

    # collect images from path
    images = []
    spinner = Spinner(('collect images from %s  ' % input_path))
    for current_file in os.listdir(input_path):
        if current_file.endswith('.jpg'):
            images.append(current_file)
        spinner.next()
    spinner.finish()

    # analyze images with Google Vision API
    responses = []
    bar = Bar('analyze images', max=len(images))
    for current_img in images:
        request = generate_request(input_path + '/' + current_img)
        response = call_api(request)
        responses.append({'file': current_img, 'response': response})
        bar.next()
    bar.finish()
    #print(json.dumps(responses, indent=4))

    create_csv(responses, output_filename)
Exemplo n.º 3
0
    def start_broadcast(self, broadcast_id):
        data = json.dumps({
            '_uuid':
            self.uuid,
            '_uid':
            self.username_id,
            'should_send_notifications':
            int(self.sendNotification),
            '_csrftoken':
            self.token
        })

        if self.send_request(endpoint='live/' + str(broadcast_id) + '/start/',
                             post=self.generate_signature(data)):

            print('CTRL+C to quit.')
            spinner = Spinner(" - ")
            try:
                while True:
                    spinner.next()
            except KeyboardInterrupt:
                spinner.finish()
                pass
            except Exception as error:
                print(error)
                self.end_broadcast(broadcast_id)
Exemplo n.º 4
0
def _write_shard(path, source, shard_size, shard_index=None, verbose=True, bar=_DefaultBar):
    if shard_index is not None:
        path, ext = os.path.splitext(path)
        path = path + f"_{shard_index}" + ext

    if verbose:
        if hasattr(source, "__len__"):
            bar = bar(f"Writing to {path}", max=len(source))
        else:
            bar = Spinner(f"Writing to {path} ")
    else:
        bar = None

    with h5py.File(path, "w") as f:
        for example_index, example in enumerate(source):
            example = (example,) if isinstance(example, Tensor) else example
            for i, tensor in enumerate(example):
                key = f"data_{i}"
                if key not in f.keys():
                    f.create_dataset(key, (shard_size, *tensor.shape))
                f[key][example_index, ...] = tensor
                if bar is not None:
                    bar.next()

        if bar is not None:
            bar.finish()

        if shard_index is not None:
            f.attrs["shard_index"] = int(shard_index)

    return path
Exemplo n.º 5
0
def get_template_counts(model_id):
    import tensorflow as tf
    import numpy as np
    print('Getting template counts for %s' % model_id)
    graph = tf.Graph()
    with graph.as_default():
        builder = get_builder(model_id)
        features, labels = builder.get_inputs(mode='train', repeat=False)
        spec = builder.get_estimator_spec(features, labels, mode='eval')
        predictions = spec.predictions
        probs = predictions['probs']
        counts = tf.argmax(probs, axis=-1)
        totals = np.zeros((builder.n_templates, ), dtype=np.int32)
        saver = tf.train.Saver()

        with tf.train.MonitoredSession() as sess:
            saver.restore(sess, tf.train.latest_checkpoint(builder.model_dir))
            spinner = Spinner()
            while not sess.should_stop():
                c = sess.run(counts)
                for ci in c:
                    totals[ci] += 1
                spinner.next()
                # break
            spinner.finish()
    return totals
Exemplo n.º 6
0
def get_data(query, variables):
    has_next = True
    cursor = None
    entities = []

    spinner = Spinner('Fetching Github Data')
    while has_next:
        spinner.next()
        variables['cursor'] = cursor

        rate_limit = get_rate_limit(client)
        handle_rate_limit(rate_limit)
        results = json.loads(client.execute(query, variables))

        if results['data'] and results['data']['search']['edges']:
            nodes = [ edge['node'] for edge in results['data']['search']['edges']]
            for node in nodes:
                entities.append(parse_data(node))
            has_next = results['data']['search']['pageInfo']['hasNextPage']
            cursor = results['data']['search']['pageInfo']['endCursor']
        else:
            logger.warn(f'No data found: {results}')
            has_next = False


    spinner.finish()
    print('\n')
    return entities
Exemplo n.º 7
0
def trainTimeout(dataset):
    spiner = Spinner("Training 'no' GAN...")
    while (True):
        #start = time.time()
        for vector_batch in dataset:
            train_step(vector_batch)
        spiner.next()
    spiner.finish()
Exemplo n.º 8
0
def crawl(baseUrl, firstUrl, numListingsRequested):
    spinner = Spinner('Fetching listing URLs ')

    # crawl all urls, starting with the first search result page
    listingUrls = next(baseUrl, firstUrl, numListingsRequested, spinner)

    spinner.finish()
    return listingUrls
Exemplo n.º 9
0
def start_spin(msg: str):
    global spin
    spinner = Spinner("%s " % msg)
    spin = True
    while spin:
        spinner.next()
        time.sleep(0.1)
    spinner.finish()
    return
Exemplo n.º 10
0
def wait_and_get_im_sg_fd():
    #info("waiting for device to appear")
    progressBar = Spinner("Waiting for device: ")
    while True:
        sg_fd = get_im_sg_fd()
        if sg_fd:
            progressBar.finish()
            return sg_fd
        time.sleep(0.5)
        progressBar.next()
Exemplo n.º 11
0
    def do_backup(self, args):
        """ Back the current directory up to a hard drive if one is connected.
            Otherwise do nothing for now but something can surely be arranged
            with `dropbox` as well. Both would be nice.
        """
        IGNORE_PATTERNS = ["*.pyc", "tmp", ".git", "contrib", ".xfer", ".backup",
                           ".archive", ".gitignore"]

        global TOTAL
        TOTAL = size_of_dir(os.curdir, ignore=IGNORE_PATTERNS)
        global done
        done = 0
        global spinner
        spinner = None

        def progress_update(path, names):
            global done, TOTAL
            if self.SETTINGS["verbose"]:
                print("Backing up {}.".format(path))
            for f in names:
                ignore = False
                for s in IGNORE_PATTERNS:
                    if fnmatch.fnmatch(f, s):
                        ignore = True
                if not ignore:
                    path2add = os.path.join(path, f)
                    if not os.path.isdir(path2add):
                        done += os.path.getsize(path2add)
            spinner.next()
            return IGNORE_PATTERNS

        dest = list()
        if not os.path.exists(BACKUP_DIR_PATH):
            os.makedirs(BACKUP_DIR_PATH)
        dest.append(BACKUP_DIR_PATH)
        if os.path.exists(os.path.join(DROPBOX)):
            dest.append(DROPBOX)
        pprint(dest)
        try:
            for d in dest:
                spinner = Spinner("Backing files up to {} ".format(d))
                if os.path.exists(d):
                    shutil.rmtree(d)
                shutil.copytree(os.curdir,
                                d,
                                symlinks=True,
                                ignore=progress_update
        # `ignore` can be used to log progress.
                               )
                spinner.finish()
                print(NEWLINE)
        except shutil.Error as e:
            print("Something went wrong!")
            for x in e:
                print ("Error copying {} to {}: {}".format(*x))
Exemplo n.º 12
0
def test_bar():
    from progress.spinner import Spinner
    Spinner.phases = [
        '🕐', '🕑', '🕒', '🕓', '🕔', '🕕', '🕖', '🕗', '🕘', '🕙', '🕚', '🕛'
    ]
    _status_bar = Spinner("Downloading.. ", end="www")
    for i in range(100):
        time.sleep(0.02)
        _status_bar.next()
        _status_bar.message
    _status_bar.finish()
 def spin_that(q):
     from progress.spinner import Spinner
     b = Spinner('Decrypting data with the device ', file=sys.stderr)
     for i in range(40):
         b.next()
         try:
             if q.get(block=True, timeout=0.5):
                 break
         except:
             pass
     b.finish()
     sys.stderr.write(' done\n')
Exemplo n.º 14
0
def unzip_files(path, folders=[], files=[]):
    contents = zipfile.ZipFile(path)

    # Unzip files from jar
    spin = Spinner("Unzipping files...")
    for f in contents.namelist():
        spin.next()
        for dir in folders:
            if f.startswith(dir):
                contents.extract(f)
    for f in files:
        contents.extract(f)
    spin.finish()
Exemplo n.º 15
0
def get_stars_forks_data(org, repo):
    stars = []
    stars_has_next = True
    forks = []
    forks_has_next = True
    stars_cursor = None
    forks_cursor = None

    spinner = Spinner('Fetching Stars and Forks')
    while stars_has_next or forks_has_next:
        spinner.next()
        variables = {
            "org": org,
            "repoName": repo,
            "size": 100,
            "starsCursor": stars_cursor,
            "forksCursor": forks_cursor,
        }
        query = get_data_query('graphql/stars_forks_data.gql')

        rate_limit = get_rate_limit(client)
        handle_rate_limit(rate_limit)
        results = json.loads(client.execute(query, variables))

        if results['data'] and results['data']['repository']['stargazers']['edges']:
            for edge in results['data']['repository']['stargazers']['edges']:
                stars.append({
                    'owner': org,
                    'repo': repo,
                    'createdAt': edge['starredAt'],
                })

        stars_has_next = results['data']['repository']['stargazers']['pageInfo']['hasNextPage']
        stars_cursor = results['data']['repository']['stargazers']['pageInfo']['endCursor']

        if results['data'] and results['data']['repository']['forks']['edges']:
            nodes = [ edge['node'] for edge in results['data']['repository']['forks']['edges']]

            for node in nodes:
                forks.append({
                    'owner': org,
                    'repo': repo,
                    'createdAt': node['createdAt']
                })

        forks_has_next = results['data']['repository']['forks']['pageInfo']['hasNextPage']
        forks_cursor = results['data']['repository']['forks']['pageInfo']['endCursor']

    spinner.finish()
    return stars, forks
Exemplo n.º 16
0
def process_list(urlist, wanted, output):
    processed = []
    formatted = (out.rtf_format if output == "rtf" else
                 out.json_format if output == "json" else out.std_format)
    spinner = Spinner("Checking URLs ")
    with concurrent.futures.ThreadPoolExecutor() as executor:
        results = [executor.submit(get_status, url) for url in urlist]
        for connection in concurrent.futures.as_completed(results):
            status = connection.result()
            if status["desc"] in wanted:
                processed.append(formatted(status))
            spinner.next()
    spinner.writeln("\033[F")  # move cursor to the beginning of previous line
    spinner.finish()
    return processed
Exemplo n.º 17
0
def die():
    global alive
    alive = False
    print("\n[+] Cleaning up...")
    time.sleep(1)
    spinner = Spinner("Exiting...")
    for _ in range(10):
        spinner.next()
        time.sleep(0.1)
    spinner.finish()
    clear()
    if linux:
        os.system("reset")
        sys.exit(0)
    print("[+] Exited Successfully")
    os._exit(1)
Exemplo n.º 18
0
 def gen_samples_with_prob(self, samples):
     self.generator.drawSamples(samples)
     self.generator.toCSV(self.path + 'samples.csv')
     samplesToReturn = []
     # Load the csv and return samples as list
     with open(self.path + 'samples.csv', 'r') as read_obj:
         csv_dict_reader = DictReader(read_obj)
         spinner = Spinner("Loading samples...")
         for row in csv_dict_reader:
             asdict = dict(row)
             world = [int(value) for value in list(asdict.values())]
             prob = self.get_sampling_prob(asdict)
             samplesToReturn.append([world, asdict, prob])
             spinner.next()
         spinner.finish()
     return samplesToReturn
	def program(self):
		if markov.info_state == "rational":
			conversation = " He must be really well-informed :-o"
		else:
			conversation = " I hope he makes a good decision..."
		
		for i in range(0, self.maximum):
			time.sleep(5)
			print("The starting state is " + markov.info_state + ", Master!" + conversation)
			program_spinner = Spinner("Simulating, Master!")
			while True:
				switching()
				program_spinner.next()
				if choice.current == build_up.destination:
					break
			program_spinner.finish()
Exemplo n.º 20
0
    def analyse(self):
        """
        read from the messages queue, and generate:
        1. Counter for From field
        2. Counter for Time field (by hour)
        """

        # {'id': '16f39fe119ee8427', 'labels': ['UNREAD', 'CATEGORY_UPDATES', 'INBOX'], 'fields': {'from': 'Coursera <*****@*****.**>', 'date': 'Tue, 24 Dec 2019 22:13:09 +0000'}}

        with concurrent.futures.ThreadPoolExecutor() as executor:
            progress = Spinner(f"{helpers.loader_icn} Loading messages ")

            event = Event()

            future = executor.submit(self._load_table, event)

            while not event.isSet() and future.running():
                progress.next()
                time.sleep(0.1)

            progress.finish()

            progress = Spinner(f"{helpers.loader_icn} Analysing count ")

            event = Event()

            future = executor.submit(self._analyze_count, event)

            while not event.isSet() and future.running():
                progress.next()
                time.sleep(0.1)

            progress.finish()

            progress = Spinner(f"{helpers.loader_icn} Analysing senders ")

            event = Event()

            future = executor.submit(self._analyze_senders, event)

            while not event.isSet() and future.running():
                progress.next()
                time.sleep(0.1)

            progress.finish()

            progress = Spinner(f"{helpers.loader_icn} Analysing dates ")

            event = Event()

            future = executor.submit(self._analyze_date, event)

            while not event.isSet() and future.running():
                progress.next()
                time.sleep(0.1)

            progress.finish()
Exemplo n.º 21
0
def banner():
    init()
    i = 0
    load = Spinner()
    while (i < 5):
        i = i + 1
        time.sleep(1)
        load.next()
    load.finish()
    os.system("clear")

    print(
        colored(
            """
----------------------------------------------------------

████████╗░█████╗░██████╗░░█████╗░███╗░░██╗████████╗██╗░░░██╗██╗░░░░░░█████╗░  
╚══██╔══╝██╔══██╗██╔══██╗██╔══██╗████╗░██║╚══██╔══╝██║░░░██║██║░░░░░██╔══██╗  
░░░██║░░░███████║██████╔╝███████║██╔██╗██║░░░██║░░░██║░░░██║██║░░░░░███████║  
░░░██║░░░██╔══██║██╔══██╗██╔══██║██║╚████║░░░██║░░░██║░░░██║██║░░░░░██╔══██║  
░░░██║░░░██║░░██║██║░░██║██║░░██║██║░╚███║░░░██║░░░╚██████╔╝███████╗██║░░██║  
░░░╚═╝░░░╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚══╝░░░╚═╝░░░░╚═════╝░╚══════╝╚═╝░░╚═╝  

██╗░░░██╗  ██████╗░░░░░░███╗░░
██║░░░██║  ╚════██╗░░░░████║░░
╚██╗░██╔╝  ░░███╔═╝░░░██╔██║░░
░╚████╔╝░  ██╔══╝░░░░░╚═╝██║░░
░░╚██╔╝░░  ███████╗██╗███████╗
░░░╚═╝░░░  ╚══════╝╚═╝╚══════╝
----------------------------------------------------------
Version:2.1
Dipnot=Test amaçlı yazılmış olup tüm sorumluluk kullanıcıya aittir.
----------------------------------------------------------   """, "cyan"))
    print(
        colored(
            """
#########################
#  Created by Rei-ken   #
#########################""", "blue"))
    print(
        colored(
            """       
Github : https://github.com/Rei-ken/
Youtube: https://youtube.com/channel/UC0huPBEXz8EW8SekJWVE_JQ
Discord: https://discord.gg/fMWyY5b 
Web    : https://reiken.online/
""", "magenta"))
Exemplo n.º 22
0
 def getEntropy(self):
     cNodes = len(self.structure[0])
     samples = list(itertools.product([1, 0], repeat=cNodes))
     sum = 0.00
     print(len(samples))
     spinner = Spinner("Calculating entropy...")
     for sample in samples:
         evidence = {i: sample[i] for i in range(0, len(sample))}
         prSample = self.get_sampling_prob(evidence)
         if prSample != 0:
             term = prSample * math.log2(prSample)
         else:
             term = 0
         sum += term
         spinner.next()
     spinner.finish()
     return -sum
Exemplo n.º 23
0
    def fetch_articles(self, output_progress=False):
        spinner = None
        if output_progress:
            spinner = Spinner('Loading articles ')

        articles_index = []

        last_fetch = self._configs.get('last_fetch')

        offset = 0
        count = 20
        while(True):
            try:
                articles = self._pocket.retrieve(
                    state='unread',
                    count=count,
                    offset=offset,
                    since=last_fetch
                )
            except PocketException as e:
                spinner.finish()
                raise_from(self._check_exception(e), e)

            if not articles['list']:
                break

            articles_index.extend(self._get_articles_index(articles))

            offset += count
            if spinner:
                spinner.next()

        if spinner:
            spinner.finish()

        sort_field = self._configs.get('sort_field')
        if not sort_field:
            sort_field = 'reading_time'

        articles_index = sorted(articles_index,
                                key=itemgetter(sort_field))
        self._storage.write(articles_index)

        self._configs.set('last_fetch', self._get_timestamp(datetime.now()))
        self._configs.write()
Exemplo n.º 24
0
def get_expression_data(df, GTEX_DB, GTEX_COLUMNS):
    """
    This function get median gene expression values for 53 tissues from gtex db
    """
    new_df = pd.DataFrame().assign(**GTEX_COLUMNS)
    spinner = Spinner('Adding fetures ')
    for gene_name in df.gene_symbol:
        try:
            spinner.next()
            values = GTEX_DB.query(f'Description == "{gene_name}"').iloc[:, 2:]
            if len(values) != 1:
                values = pd.Series(GTEX_COLUMNS)
            new_df = new_df.append(values, ignore_index=True)
        except:
            continue
    spinner.finish()
    print(new_df)
    return new_df
Exemplo n.º 25
0
def spinner():
    # Just a little bit of flash. Has no true functionality, apart from
    # possibly some psychological lift.
    print

    spinner = Spinner("Fetching tasks ")
    for i in range(16):
        spinner.next()
        sleep(0.1)
    spinner.finish()

    spinner = Spinner("\rPicking a random task ")
    for i in range(16):
        spinner.next()
        sleep(0.1)
    spinner.finish()

    print "Here you go!"
Exemplo n.º 26
0
    def fetch_articles(self, output_progress=False):
        spinner = None
        if output_progress:
            spinner = Spinner('Loading articles ')

        articles_index = []

        last_fetch = self._configs.get('last_fetch')

        offset = 0
        count = 20
        while (True):
            try:
                articles = self._pocket.retrieve(state='unread',
                                                 count=count,
                                                 offset=offset,
                                                 since=last_fetch)
            except PocketException as e:
                spinner.finish()
                raise_from(self._check_exception(e), e)

            if not articles['list']:
                break

            articles_index.extend(self._get_articles_index(articles))

            offset += count
            if spinner:
                spinner.next()

        if spinner:
            spinner.finish()

        sort_field = self._configs.get('sort_field')
        if not sort_field:
            sort_field = 'reading_time'

        articles_index = sorted(articles_index, key=itemgetter(sort_field))
        self._storage.write(articles_index)

        self._configs.set('last_fetch', self._get_timestamp(datetime.now()))
        self._configs.write()
Exemplo n.º 27
0
def Pb10():
    from progress.spinner import Spinner
    #from progress.spinner import MoonSpinner
    #from progress.spinner import PieSpinner
    #from progress.spinner import PixelSpinner
    #from progress.spinner import LineSpinner

    import time

    bar = Spinner('进度条10', max=100)  #max的值100,可调节
    #bar = MoonSpinner('进度条10', max=100)
    #bar = PieSpinner('进度条10', max=100)
    #bar = PixelSpinner('进度条10', max=100)
    #bar = LineSpinner('进度条10', max=100)

    for i in range(100):  #这个也需要适当调节
        bar.next()
        time.sleep(0.1)  #延迟时间,可调节,0.1~1之间最佳

    bar.finish()
Exemplo n.º 28
0
 def get_aws_images(self, os_version):
     """Get the images available"""
     ec2 = boto3.client('ec2', self._region)
     os = "*" + os_version + '*'
     filters = [{'Name': 'name', 'Values': [os]}]
     image_info = {}
     spinner = Spinner(OKGREEN + "Getting images from AWS" + ' ' + ENDC,
                       end=" ")
     aws_images = ec2.describe_images(Filters=filters)
     for image in aws_images['Images']:
         image_info[image['ImageId']] = {}
         image_info[image['ImageId']]['name'] = image['Name']
         image_info[image['ImageId']]['date'] = image['CreationDate']
     spinner.next()
     # Sort images by date
     sorted_images = dict(
         OrderedDict(sorted(image_info.items(),
                            key=lambda t: t[1]['date'])))
     spinner.finish()
     return sorted_images
Exemplo n.º 29
0
def create_csv(result, csv_filename):
    spinner = Spinner(('create CSV file to %s  ' % csv_filename))
    with open(csv_filename, 'w') as f:
        writer = csv.writer(f, delimiter='\t')

        head = []
        head.append('FILE')
        head.append('DOMINANT_COLOR_RGB')
        for x in range(MAX_RESULTS):
            head.append('LABEL_' + str(x))
        writer.writerow(head)

        for response in result:
            #print(response)
            row = [
                response['file'],
                "(%s, %s, %s)" % (
                    response['response']['responses'][0]
                    ['imagePropertiesAnnotation']['dominantColors']['colors']
                    [1]['color']['red'],
                    response['response']['responses'][0]
                    ['imagePropertiesAnnotation']['dominantColors']['colors']
                    [1]['color']['green'],
                    response['response']['responses'][0]
                    ['imagePropertiesAnnotation']['dominantColors']['colors']
                    [1]['color']['blue'],
                ),
            ]

            for x in range(MAX_RESULTS):
                try:
                    label = response['response']['responses'][0][
                        'labelAnnotations'][x]['description']
                except IndexError:
                    label = 'NULL'
                row.append(label)
            #print(row)

            writer.writerow(row)
            spinner.next()
        spinner.finish()
 def program(self):  #main program operation
     if markov.info_state == "rational":
         conversation = " He must be really well-informed :-o"
     else:
         conversation = " I hope he makes a good decision..."
     for i in range(0, self.maximum):
         time.sleep(3)
         print("The starting state is %s, Master! %s" %
               (markov.info_state, conversation))
         #print("Going from %s to %s!" % (build_up.source, build_up.destination))
         print("Going from %s to %s!" %
               (choice.current, choice.destination))
         time.sleep(3)
         program_spinner = Spinner("Simulating, Master!")
         while True:
             switching()
             print("Running!")
             program_spinner.next()
             print(choice.choice_list)
             if choice.current == choice.destination:
                 break
         program_spinner.finish()
         build_up.set_points()
Exemplo n.º 31
0
def entry(wikisite, wikipath, user, password, dry, verbose, logfile, protocol,
          sleep, start):
    global isSilent, logfileName

    startMatched = False
    processed = 0
    updated = 0

    if verbose:
        isSilent = False
    else:
        if logfile:
            logfileName = logfile

    site = mwclient.Site(wikisite, wikipath, scheme=protocol)
    site.requests['timeout'] = 300
    site.login(user, password)

    if dry:
        echo("Doing a dry-run! No writes will be performed")

    if isSilent:
        spinner = Spinner('Loading ')

    for page in site.Categories['Glossary']:

        if start and not startMatched:
            if page.page_title != start:
                continue
            else:
                startMatched = True

        processed = processed + 1

        if page.namespace != 0:
            echo("\tNot a regular page!")
            continue

        echo(page.page_title)
        text = page.text()

        if '|Link=' not in text:
            echo("\tLink param not found!")
            continue

        wikicode = mwparserfromhell.parse(text)
        templates = wikicode.filter_templates(matches='Glossary')
        if not len(templates):
            echo("\tTemplate not found!")
            continue

        template = templates[0]
        if not template:
            echo("\tTemplate not found!")
            continue

        if template.has_param('Link'):
            link = template.get('Link')
            linkValue = link.value.rstrip()
            echo("\tLink: %s" % linkValue)

            if not validators.url(linkValue):
                echo("\t! The param value (%s) is not an URL!" % linkValue)
                continue

            r = requests.get('https://www.ebi.ac.uk/ols/api/terms?iri=%s' %
                             linkValue.replace('https://', 'http://'),
                             timeout=300)
            json = r.json()

            if '_embedded' not in json:
                echo("\t! The API response does not contain a _embedded list:")
                echo("\t\t%s" % json)
                continue

            if 'terms' not in json['_embedded']:
                echo("\t! The API response does not contain a terms list:")
                echo("\t\t%s" % json)
                continue

            terms = json['_embedded']['terms']
            if not len(terms):
                echo('\tTerms not found!')
                continue
            for term in terms:
                label = term['label']
                echo("\t\t%s" % label)
                is_obsolete = term['is_obsolete']
                replaced = term['term_replaced_by']

                if replaced and not validators.url(replaced):
                    echo(
                        "\t! The replacement is not an URL but a term ID: %s" %
                        replaced)
                    r2 = requests.get(
                        'https://www.ebi.ac.uk/ols/api/terms?id=%s' % replaced,
                        timeout=300)
                    json2 = r2.json()

                    if not json2:
                        echo("Unable to fetch URI for term replcement %s" %
                             replaced)
                        continue
                    if '_embedded' not in json2:
                        echo("Unable to fetch _embedded for term replcement %s"
                             % replaced)
                        continue
                    if 'terms' not in json2['_embedded']:
                        echo("Unable to fetch terms for term replcement %s" %
                             replaced)
                        continue
                    if not len(json2['_embedded']['terms']):
                        echo("Unable to fetch terms for term replcement %s" %
                             replaced)
                        continue

                    v = json2['_embedded']['terms'][0]['iri']
                    if not v:
                        echo("Unable to fetch URI for term replcement %s" %
                             replaced)
                        continue
                    replaced = v

                if replaced:
                    echo("\tTerm '%s' (Link: %s) is replaced by: %s" %
                         (label, linkValue, replaced))
                    link.value = '%s\n' % replaced
                    if not dry:
                        page.edit(text=str(wikicode),
                                  summary='Updating EFO links: %s -> %s' %
                                  (linkValue, replaced))
                        updated = updated + 1
                    echo("----------------")
                    echo(str(wikicode))
                    echo("----------------")
                    # all is done for the term, go next
                    continue

        else:
            echo("\t! Link param not found")
            continue

        if isSilent:
            spinner.next()
        if sleep:
            time.sleep(sleep)

    if isSilent:
        spinner.finish()

    echo("Done!")
    echo("\nProcessed %s pages, Updated %s pages" % (processed, updated))
Exemplo n.º 32
0
            print("The following error message was received from SLACK API: " + str(response['error']))
            continue

        spinner = Spinner('Looking for files...\n')

        if(fileCount > 0):
            print("Parsing " + str(fileCount) + " files...\n", end='', file=sys.stdout)
            spinner.next()
        else:
            print("No files older than 10 days found.", file=sys.stdout)

        spinning = True
        while spinning:

            if len(response["files"]) == 0:
                spinner.finish()
                spinning = False
                break
            elif(whileCount >= fileCount and whileCount > 1):
                spinner.finish()
                spinning = False
                sys.stdout.flush()
                sys.stdout.write("We couldn't delete some files posted by other users on private conversations.")
                break
            else:
                iteratorCounter = 0
                spinner.next()
                for f in response["files"]:
                    iteratorCounter += 1

                    #get user info for this file
Exemplo n.º 33
0
    def update(self, file_name=None, file_path=None):
        # If no file name or file path is set, use the command line arguments.
        if file_name == None and file_path == None:
            file_name = sys.argv[2]
            file_path = sys.argv[3]

        # Get Drive service.
        driveConnect = drive_api.get_service()

        # Check if a remote file with the given name exists. If one does not, print an error message and return.
        if not drive_api.file_with_name_exists(driveConnect, file_name):
            print('Remote file with name ' + file_name + ' does not exist.')
            return

        # Get directory ID.
        dirId = drive_api.get_file_id_from_name(driveConnect, file_name)

        # Get a list of the fragments that currently make up the file. If this is a new upload, it should come back empty.
        orig_fragments = drive_api.get_files_list_from_folder(
            driveConnect, dirId)

        # Determine if upload is taking place from an HTTP or HTTPS URL.
        urlUpload = False
        if file_path[0:4].lower() == 'http':
            urlUpload = True
            urlUploadHandle = requests.get(file_path,
                                           stream=True,
                                           allow_redirects=True)

        fileSize = -1  # If file is being uploaded from web server and size cannot be retrieved this will stay at -1.
        if urlUpload:
            try:
                fileSize = int(urlUploadHandle.headers.get('content-length'))
            except TypeError:
                pass
            if fileSize == -1:
                # If fileSize is set to -1, set totalFrags to "an unknown number of"
                totalFrags = 'an unknown number of'
        else:
            fileSize = os.stat(file_path).st_size

        if fileSize != -1:
            totalFrags = math.ceil(fileSize / 10223999)
        print('Upload started. Upload will be composed of ' + str(totalFrags) +
              ' fragments.\n')

        # Set chunk size for reading files to 9.750365257263184MB (10223999 bytes)
        readChunkSizes = 10223999

        # Doc number
        docNum = 1

        # Used to keep track of the numbers for fragments that have failed uploads.
        failedFragmentsSet = set()

        # Progress bar
        if fileSize == -1:
            # The file size is unknown
            upBar = Spinner('Uploading... ')
        else:
            # The file size is known
            upBar = ShadyBar('Uploading...',
                             max=max(math.ceil(fileSize / 10223999),
                                     len(orig_fragments)))

        if urlUpload:
            # If the upload is taking place from a URL...
            # Iterate through remote file until no more data is read.
            for fileBytes in urlUploadHandle.iter_content(
                    chunk_size=readChunkSizes):
                # Advance progress bar
                upBar.next()

                if docNum <= len(orig_fragments):
                    # A remote fragment is present, so update it.
                    upload_handler.handle_update_fragment(
                        drive_api, orig_fragments[docNum - 1], fileBytes,
                        driveConnect, docNum, self.debug_log)
                else:
                    # Process the fragment and upload it to Google Drive.
                    upload_handler.handle_upload_fragment(
                        drive_api, fileBytes, driveConnect, dirId, docNum,
                        failedFragmentsSet, self.debug_log)

                # Increment docNum for next Word document.
                docNum = docNum + 1

                # Run garbage collection. Hopefully, this will prevent process terminations by the operating system on memory-limited devices such as the Raspberry Pi.
                gc.collect()
        else:
            # If the upload is taking place from a file path...
            # Get file byte size
            fileSize = os.path.getsize(file_path)

            # Iterate through file in chunks.
            infile = open(str(file_path), 'rb')

            # Read an initial chunk from the file.
            fileBytes = infile.read(readChunkSizes)

            # Keep looping until no more data is read.
            while fileBytes:
                # Advance progress bar
                upBar.next()

                if docNum <= len(orig_fragments):
                    # A remote fragment is present, so update it.
                    upload_handler.handle_update_fragment(
                        drive_api, orig_fragments[docNum - 1], fileBytes,
                        driveConnect, docNum, self.debug_log)
                else:
                    # Process the fragment and upload it to Google Drive.
                    upload_handler.handle_upload_fragment(
                        drive_api, fileBytes, driveConnect, dirId, docNum,
                        failedFragmentsSet, self.debug_log)

                # Increment docNum for next Word document and read next chunk of data.
                docNum = docNum + 1
                fileBytes = infile.read(readChunkSizes)

                # Run garbage collection. Hopefully, this will prevent process terminations by the operating system on memory-limited devices such as the Raspberry Pi.
                gc.collect()

            infile.close()

        # If an update took place and the new file had fewer fragments than the previous file, delete any leftover fragments from the previous upload.
        docNum = docNum - 1
        while docNum < len(orig_fragments):
            upBar.next()
            drive_api.delete_file_by_id(drive_api.get_service(),
                                        orig_fragments[docNum]['id'])
            docNum = docNum + 1

        # Process fragment upload failures
        upload_handler.process_failed_fragments(drive_api, failedFragmentsSet,
                                                dirId, self.debug_log)

        upBar.finish()

        # If the number of fragments to expect from a file upload is known, verify that the upload is not corrupted.
        if totalFrags != 'an unknown number of':
            print('Verifying upload.')
            foundFrags = len(
                drive_api.get_files_list_from_folder(drive_api.get_service(),
                                                     dirId))
            if (totalFrags != foundFrags):
                self.debug_log.write(
                    "----------------------------------------\n")
                self.debug_log.write(
                    "InfiniDrive detected upload corruption.\n")
                self.debug_log.write("Expected Fragments: " + str(totalFrags) +
                                     "\n")
                self.debug_log.write("Actual Fragments  : " + str(foundFrags) +
                                     "\n")
                print(
                    'InfiniDrive has detected that your upload was corrupted. Please report this issue on the InfiniDrive GitHub issue tracker and upload your "log.txt" file.'
                )

        print('Upload complete!')