Beispiel #1
0
def get_trans_and_categories_from_mint(mint_client, oldest_trans_date):
    # Create a map of Mint category name to category id.
    logger.info('Creating Mint Category Map.')
    start_time = time.time()
    asyncSpin = AsyncProgress(Spinner('Fetching Categories '))
    categories = dict([(cat_dict['name'], cat_id)
                       for (cat_id,
                            cat_dict) in mint_client.get_categories().items()])
    asyncSpin.finish()

    today = datetime.datetime.now().date()
    # Double the length of transaction history to help aid in
    # personalized category tagging overrides.
    start_date = today - (today - oldest_trans_date) * 2
    start_date_str = start_date.strftime('%m/%d/%y')
    logger.info('Get all Mint transactions since {}.'.format(start_date_str))
    asyncSpin = AsyncProgress(Spinner('Fetching Transactions '))
    transactions = mint_client.get_transactions_json(start_date=start_date_str,
                                                     include_investment=False,
                                                     skip_duplicates=True)
    asyncSpin.finish()

    dur = s_to_time(time.time() - start_time)
    logger.info('Got {} transactions and {} categories from Mint in {}'.format(
        len(transactions), len(categories), dur))

    return transactions, categories
Beispiel #2
0
    def clean(self, password: str):
        if platform == 'linux':
            try:
                install_progress = Spinner(message='Cleaning Up Packages ')

                for _ in range(1, 75):
                    time.sleep(0.007)
                    install_progress.next()

                proc = Popen('sudo apt-get -y autoremove'.split(),
                             stdin=PIPE,
                             stdout=PIPE,
                             stderr=PIPE)

                proc.communicate(password.encode())

                for _ in range(1, 26):
                    time.sleep(0.007)
                    install_progress.next()

                click.echo('\n')
                click.echo(
                    click.style('🎉 Successfully Cleaned Turbocharge! 🎉',
                                fg='green'))

            except subprocess.CalledProcessError as e:
                click.echo(e.output)
                click.echo('An Error Occured During Installation...', err=True)

        elif platform == 'win32':
            pass  # chocolatey auto removes files

        elif platform == 'darwin':
            try:
                install_progress = Spinner(message='Cleaning Up Packages ')

                for _ in range(1, 75):
                    time.sleep(0.007)
                    install_progress.next()

                proc = Popen('brew cleanup'.split(),
                             stdin=PIPE,
                             stdout=PIPE,
                             stderr=PIPE)

                # proc.communicate(password.encode())

                for _ in range(1, 26):
                    time.sleep(0.007)
                    install_progress.next()

                click.echo('\n')
                click.echo(
                    click.style('🎉 Successfully Cleaned Turbocharge! 🎉',
                                fg='green'))

            except subprocess.CalledProcessError as e:
                click.echo(e.output)
                click.echo('An Error Occured During Installation...', err=True)
Beispiel #3
0
    def analyse(self):
        """
        read from the messages queue, and generate:
        1. Counter for From field
        2. Counter for Time field (by hour)
        """

        # {'id': '16f39fe119ee8427', 'labels': ['UNREAD', 'CATEGORY_UPDATES', 'INBOX'], 'fields': {'from': 'Coursera <*****@*****.**>', 'date': 'Tue, 24 Dec 2019 22:13:09 +0000'}}

        with concurrent.futures.ThreadPoolExecutor() as executor:
            progress = Spinner(f"{helpers.loader_icn} Loading messages ")

            event = Event()

            future = executor.submit(self._load_table, event)

            while not event.isSet() and future.running():
                progress.next()
                time.sleep(0.1)

            progress.finish()

            progress = Spinner(f"{helpers.loader_icn} Analysing count ")

            event = Event()

            future = executor.submit(self._analyze_count, event)

            while not event.isSet() and future.running():
                progress.next()
                time.sleep(0.1)

            progress.finish()

            progress = Spinner(f"{helpers.loader_icn} Analysing senders ")

            event = Event()

            future = executor.submit(self._analyze_senders, event)

            while not event.isSet() and future.running():
                progress.next()
                time.sleep(0.1)

            progress.finish()

            progress = Spinner(f"{helpers.loader_icn} Analysing dates ")

            event = Event()

            future = executor.submit(self._analyze_date, event)

            while not event.isSet() and future.running():
                progress.next()
                time.sleep(0.1)

            progress.finish()
Beispiel #4
0
    def start_broadcast(self, broadcast_id):
        data = json.dumps({
            '_uuid':
            self.uuid,
            '_uid':
            self.username_id,
            'should_send_notifications':
            int(self.sendNotification),
            '_csrftoken':
            self.token
        })

        if self.send_request(endpoint='live/' + str(broadcast_id) + '/start/',
                             post=self.generate_signature(data)):

            print('CTRL+C to quit.')
            spinner = Spinner(" - ")
            try:
                while True:
                    spinner.next()
            except KeyboardInterrupt:
                spinner.finish()
                pass
            except Exception as error:
                print(error)
                self.end_broadcast(broadcast_id)
def project(name):
    dirs = os.listdir('./')
    if name in dirs:
        print(
            'Project with same name already present. Start one with another name.'
        )
        exit()
    files = [
        'trainers', 'models', 'data', 'data_loader', 'preprocessors',
        'results', 'executables'
    ]
    files = ['./' + name] + ['./' + name + '/' + f for f in files]
    spinner = Spinner('Generating a New Project: ')
    for f in files:
        os.system('mkdir ' + f)
        time.sleep(0.1)
        spinner.next()
        time.sleep(0.1)
        spinner.next()
    print()
    print('The project ' + name + ' has been initialised.')
    file = open('.active', 'w')
    file.write(name)
    file.close()

    print('Copy paste the data into the ./' + name + '/data/ directory.')
Beispiel #6
0
def _write_shard(path, source, shard_size, shard_index=None, verbose=True, bar=_DefaultBar):
    if shard_index is not None:
        path, ext = os.path.splitext(path)
        path = path + f"_{shard_index}" + ext

    if verbose:
        if hasattr(source, "__len__"):
            bar = bar(f"Writing to {path}", max=len(source))
        else:
            bar = Spinner(f"Writing to {path} ")
    else:
        bar = None

    with h5py.File(path, "w") as f:
        for example_index, example in enumerate(source):
            example = (example,) if isinstance(example, Tensor) else example
            for i, tensor in enumerate(example):
                key = f"data_{i}"
                if key not in f.keys():
                    f.create_dataset(key, (shard_size, *tensor.shape))
                f[key][example_index, ...] = tensor
                if bar is not None:
                    bar.next()

        if bar is not None:
            bar.finish()

        if shard_index is not None:
            f.attrs["shard_index"] = int(shard_index)

    return path
    def __init__(self, origin_url: str, save_file: str):
        """
            This download module, receives 2 parameters, the URL to get the file from
            and a name to store that file. It will start the download process and will
            show a progress or spinner depending if it can obtain the size of the file.

            Arguments:
                origin_url {str}: URL where the file is located
                save_file {str}: Where the content of the file is gonna be saved
        """
        request_call = requests.get(origin_url, stream=True)

        file_size = request_call.headers["content-length"]
        if file_size:
            progress_status = Bar(save_file, max=int(file_size))
        else:
            progress_status = Spinner(save_file)

        with open(save_file, "wb") as file_:
            for chunk in request_call.iter_content(chunk_size=1024 * 50):
                if chunk:  # filter out keep-alive new chunks
                    progress_status.next(len(chunk))
                    file_.write(chunk)

        progress_status.finish()
Beispiel #8
0
def chek_resource():
    state = ""
    spinner = Spinner('Checking for new resource...')

    while state != 'FINISHED':
        #ps  -o pid -C vaas-deploy-vm.py
        #pcounter = subprocess.Popen(['ps', '-ef | grep vaas |wc -l'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        pcounter = subprocess.Popen(['pgrep', '[v]aas-deploy-vm'],
                                    stdout=subprocess.PIPE,
                                    shell=False)
        nproc = len(pcounter.communicate()[0].split())
        if nproc == 1:

            state = "FINISHED"
            os.popen('setterm -cursor on').read()

        else:
            #priority queue handle
            plist = []
            #child = subprocess.Popen(['pgrep', '-f', 'vaas-deploy-vm'], stdout=subprocess.PIPE, shell=False)
            father = subprocess.Popen(['pgrep', '[v]aas-deploy-vm'],
                                      stdout=subprocess.PIPE,
                                      shell=False)
            response = father.communicate()[0]
            for pid in response.split():
                plist.append(pid)
            #  print "io "+ str(os.getpid())
            #  print "il piu vecchio "+ str(min(plist))
            if str(min(plist)) == str(os.getpid()):

                state = "FINISHED"

            time.sleep(2)
            spinner.next()
Beispiel #9
0
def get_mint_client(args):
    email = args.mint_email
    password = args.mint_password

    if not email:
        email = input('Mint email: ')

    if not password:
        password = keyring.get_password(KEYRING_SERVICE_NAME, email)

    if not password:
        password = getpass.getpass('Mint password: '******'Missing Mint email or password.')
        exit(1)

    asyncSpin = AsyncProgress(Spinner('Logging into Mint '))

    mint_client = Mint.create(email, password)

    # On success, save off password to keyring.
    keyring.set_password(KEYRING_SERVICE_NAME, email, password)

    asyncSpin.finish()

    return mint_client
Beispiel #10
0
def FindFiles():
    load_state = 0
    spinner = Spinner('Finding Files ')
    while load_state != 'FINISHED':
        f = open("logs/path.txt", "a")
        cnt = 0
        for root, dirs, files in os.walk("/"):
            # for root, files in os.walk("/YOUR/TESTING/DIRECTORY"):
            for dir in dirs:
                if any(s in root for s in EXCLUDE_DIRECTORY):
                    spinner.next()
                    pass
                else:
                    for file in files:
                        if file.endswith(EXCLUDE):
                            cnt += 1
                            TARGET = os.path.join(root, file)
                            f.write(TARGET + '\n')
                            spinner.next()
                            print(root)

        f.close()
        load_state = 'FINISHED'

    print()
    print("Found {} target files".format(cnt))
    print()
def main(args):
    print('main')
    if len(args) != 3:
        print('Error: Invalid number of arguments')
        printUsage()
        return
    elif not os.path.isdir(argv[1]) or not os.path.isdir(argv[2]):
        print('Error: Invalid inputs. Make sure directories exist already.')
        printUsage()
        return
    mars_input_path = os.path.abspath(argv[1])
    mars_imu_path = mars_input_path + '/mars_imu.txt'
    mars_image_path = mars_input_path + '/images'
    euroc_output_path = os.path.abspath(argv[2]) + '/euroc_format'
    euroc_imu_path = euroc_output_path + '/imu0'
    euroc_cam_path = euroc_output_path + '/cam0'
    if os.path.isdir(euroc_output_path):
        shutil.rmtree(euroc_output_path)
    os.mkdir(euroc_output_path)
    os.mkdir(euroc_imu_path)
    os.mkdir(euroc_cam_path)
    # convert IMU data
    spinner = Spinner()
    imu_conversion_success = convertMARSIMUToEUROC(mars_imu_path, euroc_imu_path, spinner)
    camera_conversion_success = convertMARSImagestoEUROC(mars_image_path, euroc_cam_path, spinner)
    if imu_conversion_success and camera_conversion_success:
        print('Conversion complete. written to ' + euroc_output_path + '\n')
    else:
        print('Error converting euroc dataset')
Beispiel #12
0
 def run(self):
     self.running = True
     spinner = Spinner('[' + Fore.GREEN + '*' + St.RESET_ALL + '] ' +
                       '{} '.format(self.message))
     while self.running:
         spinner.next()
         sleep(0.2)
Beispiel #13
0
def nmap(host, user_flags):
    flags = ["sudo", "nmap", host]
    if user_flags:
        flags += user_flags
    else:
        flags += ["-Pn", "-sS", "-v"]
    nmap = subprocess.Popen(flags, stdout=subprocess.PIPE, universal_newlines=True)
    
    ports = []
    spinner = Spinner("Nmapping host...")
    while True:
        spinner.next()
        line = nmap.stdout.readline()
        if line == "" and nmap.poll() is not None:
            break
        if line:
            if re.search("/.*open", line):
                ports.append(line.strip())

    # for printing out results
    lengths = [29, len(host) + 6, len(sorted(ports, key=len)[-1]) + 9]
    longest = sorted(lengths)[-1]
    
    # if port 80 and/or 443 is open and nikto is also called, will nikto those ports
    open_ports = []
    print("\n\n+{dash}+\n|Host: {host}{space1}|\n+{dash}+\n|PORT{space2}STATE{space3}SERVICE{space4}|\n+{dash}+".format(host=host,space1=" "*(longest-len(host)-4),space2=" "*10,space3=" "*3,space4=" "*(longest-27),dash="-"*(longest+2)))
    for p in ports:
        items = p.split()
        service = " ".join(items[2:])
        print("|{port}{space1}{state}{space2}{service}{space3}|".format(port=items[0],space1=" "*(14-len(items[0])),state=items[1],space2=" "*(8-len(items[1])),service=service,space3=" "*(longest-len(service)-20)))
        open_ports.append(re.compile(r'/.*').sub('', items[0]))
    print("+{dash}+\n".format(dash="-"*(longest+2)))
        
    return open_ports
Beispiel #14
0
    def crawl(self, pages, depth=1):
        spinner = Spinner('Searching ')
        for i in range(depth):
            newpages = []
            for page in pages:
                try:
                    headers = {'User-Agent': 'Mozilla/5.0'}
                    req = Request(page, None, headers)
                    c = urlopen(req)
                except URLError as e:
                    print(e)
                    print("Could not open %s" % page)
                    continue
                soup = BeautifulSoup(c.read(), "html.parser")

                links = soup('a')
                for link in links:
                    if 'href' in dict(link.attrs):
                        url = urljoin(page, link['href'])
                        url = url.split('#')[0]  # remove location portion
                        values = self.split_words(link.getText())
                        if len(values) > 3 and self.is_useful(
                                url) and not self.is_indexed(values):
                            self.add_to_index(url, values)
                        # We only parse forum menu pages since they contain thread titles
                        if self.is_menu(url) and url not in pages:
                            newpages.append(url)
                        spinner.next()
            # Update pages to crawl
            pages = newpages
Beispiel #15
0
def download_model(name, url):
    """
    Downloads large model file 
    returns the hash of the newly downloded model 
    and location of the model in temp folder
        :param url: string of url location of the model
        :param name: string name of model 
    """
    # https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
    local_filename = url.split('/')[-1]

    local_filename = TEMP_LOCATION + local_filename

    full_hash = sha3_256()
    with requests.get(url, stream=True) as r:
        size = r.headers['content-length']
        if size:
            p = ShadyBar(local_filename, max=int(size))
        else:
            p = Spinner(local_filename)

        with open(local_filename, 'wb') as f:
            for chunk in r.iter_content(chunk_size=8192):
                if chunk:  # filter out keep-alive new chunks
                    p.next(len(chunk))
                    f.write(chunk)
                    full_hash.update(chunk)
                    # f.flush()

    unique_filename = MODEL_LOCATION + name + ".h5"
    os.rename(local_filename, unique_filename)

    return full_hash.hexdigest(), unique_filename
Beispiel #16
0
    def start(cls,
              spinner_type=None,
              sleep=0.5,
              running_message='Loading ',
              end_message='Done.'):

        if spinner_type is None:
            spinner_type = 0 if sdconfig.config.get(
                'interface', 'unicode_term') == '0' else 1

        # check
        if not cls.is_message_correct(running_message):
            raise SDException('SDPROGRE-001', 'Incorrect message')
        if not cls.is_message_correct(end_message):
            raise SDException('SDPROGRE-002', 'Incorrect message')

        cls._stop_event.clear()
        cls.end_message = end_message

        if spinner_type == 0:
            spinner = Spinner(running_message)
        elif spinner_type == 1:
            spinner = K2KSpinner(running_message)

        spinner_thread = cls.MySpinnerThread(spinner, sleep, cls._stop_event)
        spinner_thread.setDaemon(True)
        spinner_thread.start()
def preprocess(name):
    path_folder = './' + current() + '/preprocessors/'
    files = os.listdir(path_folder)
    if name + '.py' in files:
        ans = input(
            'Preprocessor with same name already defined. Enter "Y" to replace it or anything else to stop this process:  '
        ).lower()
        if ans == 'y' or ans == 'yes':
            pass
        else:
            print('Stopped Execution.')
            exit()
    path = path_folder + name + '.py'
    base = """\
import numpy as np

### Do not duplicate or change the name of the function
### preprocess and return x and y, make sure even if y 
### is 1D array, convert it into a 2D one Both should 
### ideally be numpy arrays

def preprocess(x, y):
	...
	return x, y
"""
    file = open(path, 'w')
    file.write(base)
    file.close()
    spinner = Spinner(
        'Defining a new preprocessing function with the name:  ' + name + ' ')
    for _ in range(10):
        time.sleep(0.1)
        spinner.next()
    os.system('kaa ' + path)
Beispiel #18
0
    def get_aws_regions_azs(self):
        """Get regions and AZs"""
        ec2 = boto3.client('ec2')
        regions_az = {}
        if (not (os.path.isfile(REGION_CACHE_FILENAME + self.get_provider()))):
            spinner = Spinner(
                '\033[1;32;40m getting regions and azs from AWS ')
            # Retrieves all regions/endpoints that work with EC2
            aws_regions = ec2.describe_regions()
            # Get a list of regions and then instantiate a new ec2 client for each
            # region in order to get list of AZs for the region
            for region in aws_regions['Regions']:
                spinner.next()
                my_region_name = region['RegionName']
                ec2_region = boto3.client('ec2', region_name=my_region_name)
                my_region = [{
                    'Name': 'region-name',
                    'Values': [my_region_name]
                }]
                aws_azs = ec2_region.describe_availability_zones(
                    Filters=my_region)
                for az in aws_azs['AvailabilityZones']:
                    zone = az['ZoneName']
                    regions_az.setdefault(my_region_name, set()).add(zone)
            # cache the results
            cache_write_data(REGION_CACHE_FILENAME + self.get_provider(),
                             regions_az)
        else:
            regions_az = cache_read_data(REGION_CACHE_FILENAME +
                                         self.get_provider())

        return regions_az
def model(name):
    path_folder = './' + current() + '/models/'
    files = os.listdir(path_folder)
    if name + '.py' in files:
        ans = input(
            'Model with same name already defined. Enter "Y" to replace it or anything else to stop this process:  '
        ).lower()
        if ans == 'y' or ans == 'yes':
            pass
        else:
            print('Stopped Execution.')
            exit()
    path = path_folder + name + '.py'
    base = """\
import tensorflow as tf

### Do not duplicate or change the name of the function
### build_model and return the model defined by you.

def build_model():
	model = tf.keras.Sequential([
		tf.keras.Input(shape=(input_shape)),
		...
	])
	model.compile(loss=<loss>, metrics=<metrics>)
	return model
"""
    file = open(path, 'w')
    file.write(base)
    file.close()
    spinner = Spinner('Defining a new model with the name:  ' + name + ' ')
    for _ in range(10):
        time.sleep(0.1)
        spinner.next()
    os.system('kaa ' + path)
Beispiel #20
0
    def start(self, m_filter=None):
        m_filter = dict() if not m_filter else m_filter
        client = MongoClient(self.mongo_host,
                             self.mongo_port,
                             maxPoolSize=self.mongo_max_pool_size)
        db = client[self.mongo_db_name]
        document_name = db[self.mongo_document_name]

        mongo_where = m_filter.get('mongo_condition', {})
        # get all data from mongoDB db
        m_data = document_name.find(mongo_where)

        if not self.es:
            es = Elasticsearch(
                ['localhost'],
                use_ssl=False,
            )
        i = 1
        spinner = Spinner('Importing... ')
        for line in m_data:
            docket_content = line
            # remove _id from mongo object
            del docket_content['_id']
            try:
                es.index(index=self.es_index_name,
                         doc_type=self.es_doc_type,
                         id=i,
                         body=docket_content)
            except Exception as error:
                print("Error for ", error)
            i += 1
            spinner.next()
        client.close()
        return True
Beispiel #21
0
def get_mint_client(args):
    email = args.mint_email
    password = args.mint_password

    if not email:
        email = os.getenv('MINT_EMAIL', None)

    if not email:
        email = input('Mint email: ')

    # This was causing my grief. Let's let it rest for a while.
    # if not password:
    #     password = keyring.get_password(KEYRING_SERVICE_NAME, email)

    if not password:
        password = getpass.getpass('Mint password: '******'Missing Mint email or password.')
        exit(1)

    asyncSpin = AsyncProgress(Spinner('Logging into Mint '))

    mint_client = Mint.create(email, password)

    # On success, save off password to keyring.
    keyring.set_password(KEYRING_SERVICE_NAME, email, password)

    asyncSpin.finish()

    return mint_client
Beispiel #22
0
    def _perform_spinner_request(self, url, method, method_name, data=None, payload=None, **headers):
        if self.state.verbose:
            click.echo('Sending {} request to {}'.format(method_name, url))
            if payload:
                self._echo_spinner_request_payload(payload)

        performer = AsyncFileUploader(url, method, data=data, payload=payload, **headers)
        performer.start()

        spinner = Spinner()
        while performer.is_alive():
            if self.state.verbose:
                spinner.next()

        response, error = performer.finish()

        # If we have an error and it is an ArcsecondError, raise it.
        # As for now, only ArcsecondError could be returned, and there is no
        # real point of returning both response and error below. But
        # methods in main.py expect them both.

        if error and isinstance(error, ArcsecondError):
            raise error

        if self.state.verbose:
            self._echo_spinner_request_result(error, response)

        return response, error
Beispiel #23
0
    def wait_all_replication_to_desire(self,
                                       message="Wait for replica sets: ",
                                       namespace="default"):
        """Check all replicasets and need to be running in the given namespace

        :param message: spinner message, defaults to "Wait for replica sets: "
        :type message: str, optional
        :param namespace: namespace for check, defaults to "default"
        :type namespace: str, optional
        """
        filter = {"watch": False, "namespace": namespace}
        need_wait = True
        spinner = Spinner(message)
        while (need_wait):
            for i in range(15):
                spinner.next()
                sleep(0.2)

            need_wait = False
            # Can't use filter because can't get replicasets from specific namespace
            api_response = self.get_replica_set()

            for rs in api_response.items:
                if not self.replica_set_status_check(rs):
                    need_wait = True
        print()
Beispiel #24
0
def draw(zDataCSVName):
    z_data = pd.read_csv(os.path.join(StartingDataDirectory, zDataCSVName))
    print(z_data)
    data = [
        go.Surface(
            z=z_data.to_numpy(),
            contours=go.surface.Contours(
                z=go.surface.contours.Z(
                    show=True,
                    usecolormap=True,
                    highlightcolor="#42f462",
                    project=dict(z=True)
                )
            )
        )
    ]

    layout = go.Layout(
        title='Testing Plotly',
        autosize=False,
        width=500,
        height=500,
        margin={
            'l':60,
            'r':50,
            'b':65,
            't':90
        }
    )

    fig=go.Figure(data=data, layout=layout)
    spinner = Spinner('Drawing ')
    plotly.iplot(fig, filename='elevations-3d-suface')
    spinner.finish()
def get_template_counts(model_id):
    import tensorflow as tf
    import numpy as np
    print('Getting template counts for %s' % model_id)
    graph = tf.Graph()
    with graph.as_default():
        builder = get_builder(model_id)
        features, labels = builder.get_inputs(mode='train', repeat=False)
        spec = builder.get_estimator_spec(features, labels, mode='eval')
        predictions = spec.predictions
        probs = predictions['probs']
        counts = tf.argmax(probs, axis=-1)
        totals = np.zeros((builder.n_templates, ), dtype=np.int32)
        saver = tf.train.Saver()

        with tf.train.MonitoredSession() as sess:
            saver.restore(sess, tf.train.latest_checkpoint(builder.model_dir))
            spinner = Spinner()
            while not sess.should_stop():
                c = sess.run(counts)
                for ci in c:
                    totals[ci] += 1
                spinner.next()
                # break
            spinner.finish()
    return totals
Beispiel #26
0
def copyTree(src, dst, state, symlinks=False, ignore=None):
    errors = []
    spinner = Spinner('Loading ')
    src_folder_size = get_size_format(get_directory_size(src))
    print("___ Folder Size: ", src_folder_size)

    while state != 'FINISHED':
        for item in os.listdir(src):
            if item in ignore_folders and ignore_flag.lower() == "true":
                continue
            s = os.path.join(src, item)
            d = os.path.join(dst, item)
            try:
                if os.path.isdir(s):
                    if os.path.isdir(d):
                        self.recursiveCopyTree(s, d, symlinks, ignore)
                    else:
                        shutil.copytree(s, d, symlinks, ignore)
                else:
                    shutil.copy2(s, d)
                spinner.next()
            except (IOError, os.error) as why:
                errors.append((srcname, dstname, str(why)))
            # catch the Error from the recursive copytree so that we can
            # continue with other files
            except Error as err:
                errors.extend(err.args[0])

        state = "FINISHED"
    print("___ Checking Errors: ", errors)
def main(input_path, output_filename):
    """
    Google Vison API to CSV
  """

    # collect images from path
    images = []
    spinner = Spinner(('collect images from %s  ' % input_path))
    for current_file in os.listdir(input_path):
        if current_file.endswith('.jpg'):
            images.append(current_file)
        spinner.next()
    spinner.finish()

    # analyze images with Google Vision API
    responses = []
    bar = Bar('analyze images', max=len(images))
    for current_img in images:
        request = generate_request(input_path + '/' + current_img)
        response = call_api(request)
        responses.append({'file': current_img, 'response': response})
        bar.next()
    bar.finish()
    #print(json.dumps(responses, indent=4))

    create_csv(responses, output_filename)
Beispiel #28
0
def fetch_from_slack(token, channel, oldest):
    n_messages = 0
    newest = None
    oldest = float(oldest)
    oldest_ts = str(datetime.fromtimestamp(oldest))
    spinner = Spinner('Fetching history for ' +
                      channel + ' from ' + oldest_ts + ' ')

    url = ("https://slack.com/api/conversations.history?token=" + token +
           "&channel=" + channel +
           "&count=100&inclusive=true&oldest=" + str(round(oldest)))
    # records are paged oldest to newest, however the message order
    # within a single response is newest to oldest
    for message_resp in paged_query(url):
        if not message_resp['ok']:
            raise ValueError("Error fetching channel history from Slack: ",
                             message_resp["error"])
        messages = message_resp['messages']
        newest = messages[0].get('ts', time.time())
        n_messages += len(messages)

        for message in reversed(messages):
            yield message
        spinner.next()
    print("\nFetched {0} total messages from {1} to {2}".format(
                n_messages,
                oldest_ts,
                str(datetime.fromtimestamp(float(newest)))))
Beispiel #29
0
def searchIP():
    ip = IP()
    count = 0
    global ipList
    if verbose == "0":  #No verbosity
        spinner = Spinner('I m Computing...')
        while (count < desiredIP):
            spinner.next()
            try:
                r = pyping.ping(ip.generateRandomIP4())
                if r.ret_code == 0:
                    dnsReverse(r.destination)
                    ipList.insert(count, r.destination)
                    count = count + 1
            except:
                print("")
    elif verbose == "1":  #verbosity
        while (count < desiredIP):
            ipGenerato = ip.generateRandomIP4()
            print "\nGenerated IP4: " + ipGenerato
            print "\nTesting it\n"
            try:
                r = pyping.ping(ipGenerato)
                if r.ret_code == 0:
                    print r.destination + " " + "reachable"
                    dnsReverse(r.destination)
                    ipList.insert(count, r.destination)
                    count = count + 1
                elif r.ret_code == 1:
                    print r.destination + " " + "unreachable"
            except:
                print("")
    return
def get_data(query, variables):
    has_next = True
    cursor = None
    entities = []

    spinner = Spinner('Fetching Github Data')
    while has_next:
        spinner.next()
        variables['cursor'] = cursor

        rate_limit = get_rate_limit(client)
        handle_rate_limit(rate_limit)
        results = json.loads(client.execute(query, variables))

        if results['data'] and results['data']['search']['edges']:
            nodes = [ edge['node'] for edge in results['data']['search']['edges']]
            for node in nodes:
                entities.append(parse_data(node))
            has_next = results['data']['search']['pageInfo']['hasNextPage']
            cursor = results['data']['search']['pageInfo']['endCursor']
        else:
            logger.warn(f'No data found: {results}')
            has_next = False


    spinner.finish()
    print('\n')
    return entities