Beispiel #1
0
def fetch_all_pages(collection,
                    query,
                    batch_size=1000,
                    projection={},
                    sort=None,
                    progress=True,
                    print_url=False):
    base_url = API_URL
    url = f"{base_url}/{collection}"
    params = {
        'where': json.dumps(query),
        'max_results': batch_size,
        'projection': json.dumps(projection)
    }
    if sort:
        params['sort'] = sort
    data = []
    if print_url:
        print(f"API request: {base_url}/{collection}?where={params['where']}")
    response = requests.get(url, params=params).json()  # get first page
    data.extend(response['_items'])
    if '_links' not in response:
        return data
    num_docs = response['_meta']['total']
    if num_docs <= 0:
        return data
    if progress: bar = Bar('Dowloading documents', max=num_docs)
    while 'next' in response['_links']:
        if progress: bar.goto(len(data))
        url = f"{base_url}/{response['_links']['next']['href']}"
        response = requests.get(url).json()
        data.extend(response['_items'])
    if progress: bar.goto(len(data))
    if progress: bar.finish()
    return data
def ld_command(opts):
    t_flag = False
    for opt, arg in opts:
        if opt == "-t":
            t_flag = True

    finished_days = datetime.today().date() - config.day_zero
    remaining_days = config.total_days - finished_days
    bar = Bar(
        '[Days]   ',
        max=int(config.total_days.days),
        check_tty=False,
        suffix=
        f'%(index)d/%(max)d | %(percent)d%% | left: {remaining_days.days}',
        width=10)
    bar.goto(finished_days.days)
    bar.finish()

    finished_business_days = finished_days.days - (finished_days.days % 7) * 2
    total_business_days = config.total_days.days - (config.total_days.days %
                                                    7) * 2
    remaining_business_days = total_business_days - finished_business_days
    bar = Bar(
        '[B. Days]',
        max=int(total_business_days),
        check_tty=False,
        suffix=
        f'%(index)d/%(max)d | %(percent)d%% | left: {remaining_business_days}',
        width=10)
    bar.goto(finished_business_days)
    bar.finish()
def wait_for_running(pi_queue, max_running_count=20):
    projects_cnt = pi_queue.qsize()
    bar = Bar('Running',
              fill='#',
              suffix='[%(index)d/%(max)d - %(percent).1f%% - %(elapsed)ds]',
              max=projects_cnt)
    while True:
        projects = get_projects()
        running_count = 0
        if projects:
            for project in projects:
                if project['status'] == 'RUNNING':
                    running_count += 1
        bar.goto(projects_cnt - pi_queue.qsize() - running_count)
        if running_count + pi_queue.qsize() == 0:
            break
        elif running_count < max_running_count and pi_queue.qsize() > 0:
            pi_list = []
            for i in range(max_running_count - running_count):
                if pi_queue.qsize() > 0:
                    pi_list.append(pi_queue.get())
                else:
                    break
            if len(pi_list) > 0:
                rpc_create_projects(pi_list)
                time.sleep(5)
                rpc_run_projects(pi_list)
        time.sleep(30)
    bar.goto(projects_cnt)
    bar.finish()
Beispiel #4
0
def progress_bar(stream, chunk, file, bytes_remaining):
    global bar
    M = 1000000
    if not bar:
        bar = Bar(" Download",
                  max=stream.filesize / M,
                  suffix='%(index).3f/%(max).3f MB')

    bar.goto((stream.filesize - bytes_remaining) / M)
Beispiel #5
0
class CloneProgress(RemoteProgress):
    def create_bar(self, max_count):
        self.bar = Bar('Cloning', max=max_count)

    def update(self, op_code, cur_count, max_count=None, message=''):
        if not hasattr(self, "bar"):
            self.create_bar(max_count)
        self.bar.max = max_count
        self.bar.goto(cur_count)
Beispiel #6
0
def main(conf):
    """ main function """
    parser = argparse.ArgumentParser()
    parser.add_argument('-c',
                        '--clear',
                        help='clear previous messagesi. works only with -f',
                        default=False,
                        action='store_true')
    parser.add_argument('-f',
                        '--follow',
                        help='follow mode',
                        default=False,
                        action='store_true')
    parser.add_argument('-l', '--lines', help='number of lines', default='5')
    parser.add_argument('-r', '--region', help='security region', default='40')
    args = parser.parse_args()

    numlines = args.lines
    url = conf.get('global', 'baseurl') + conf.get('regions',
                                                   'region' + args.region)

    data = libp2000.get_p2000_data(url)
    p2000data = libp2000.convert_to_json(data)

    for _ in range(int(numlines)):
        idx = _
        if args.follow:
            # output the first item last
            idx = int(numlines) - _
        print(p2000_pp(p2000data['p2000'][idx - 1]))

    if args.follow:
        refreshtime = int(conf.get('global', 'refreshtime'))
        bar = Bar('Refresh in: ', max=refreshtime)
        olddata = p2000data['p2000']
        try:
            while True:
                newdata = libp2000.convert_to_json(
                    libp2000.get_p2000_data(url))['p2000']
                newdata.reverse()
                diff = [x for x in newdata if x not in olddata]
                if len(diff) > 0:
                    if args.clear:
                        os.system('clear')
                    for item in diff:
                        print(p2000_pp(item))
                olddata = newdata
                bar.goto(0)
                for t in range(refreshtime):
                    bar.next()
                    time.sleep(1)
                bar.clearln()

        except KeyboardInterrupt:
            bar.finish()
            sys.exit(0)
Beispiel #7
0
    def wait_with_progress_bar(
            self,
            message: str,
            timeout: Optional[int] = PROGRESS_TIMEOUT) -> None:
        progress_bar = Bar(message, max=self.futures_count, check_tty=False)
        progress_bar.start()

        for progress in self.progress(timeout=timeout):
            progress_bar.goto(progress.completed)

        progress_bar.finish()
Beispiel #8
0
class Getter:
    def get(self, url, to):
        self.p = None

        def update(blocks, bs, size):
            if not self.p:
                self.p = Bar(to, max=size)
            else:
                if size < 0:
                    self.p.update()
                else:
                    self.p.goto(blocks * bs)

        urllib.request.urlretrieve(url, DIR + '/pretrained.tar.gz', update)
        self.p.finish()
Beispiel #9
0
def main(conf):
    """ main function """
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--clear', help='clear previous messagesi. works only with -f', default=False, action='store_true')
    parser.add_argument('-f', '--follow', help='follow mode', default=False, action='store_true')
    parser.add_argument('-l', '--lines', help='number of lines', default='5')
    parser.add_argument('-r', '--region', help='security region', default='40')
    args = parser.parse_args()

    numlines = args.lines
    url = conf.get('global', 'baseurl') + conf.get('regions', 'region' + args.region)

    data = libp2000.get_p2000_data(url)
    p2000data = libp2000.convert_to_json(data)

    for _ in range(int(numlines)):
        idx = _
        if args.follow:
            # output the first item last
            idx = int(numlines) - _
        print(p2000_pp(p2000data['p2000'][idx-1]))

    if args.follow:
        refreshtime = int(conf.get('global', 'refreshtime'))
        bar = Bar('Refresh in: ', max=refreshtime)
        olddata = p2000data['p2000']
        try:
            while True:
                newdata = libp2000.convert_to_json(libp2000.get_p2000_data(url))['p2000']
                newdata.reverse()
                diff = [x for x in newdata if x not in olddata]
                if len(diff) > 0:
                    if args.clear:
                        os.system('clear')
                    for item in diff:
                        print(p2000_pp(item))
                olddata = newdata
                bar.goto(0)
                for t in range(refreshtime):
                    bar.next()
                    time.sleep(1)
                bar.clearln()

        except KeyboardInterrupt:
            bar.finish()
            sys.exit(0)
Beispiel #10
0
def threads(casas, logger, nomeJson):
    bar = Bar('Loading', fill='@', suffix='%(percent)d%%', max=1000)

    threads = [
        threading.Thread(target=execute_url_list,
                         args=(
                             casa,
                             logger,
                             nomeJson,
                         )) for casa in casas
    ]

    print("Start")
    for thread in threads:
        thread.start()
        bar.next()
    for thread in threads:
        thread.join()
    bar.goto(1000)
Beispiel #11
0
    def _build_dynamic_upload_data(self, fields, callback=None):
        # The monitor is the data!
        encoded_data = encoder.MultipartEncoder(fields=fields)

        if self.state.is_using_cli is True and self.state.verbose:
            bar = Bar('Uploading ' + fields['file'][0], suffix='%(percent)d%%')
            return encoder.MultipartEncoderMonitor(encoded_data, lambda m: bar.goto(m.bytes_read / m.len * 100))
        elif self.state.is_using_cli is False and callback:
            return encoder.MultipartEncoderMonitor(encoded_data, lambda m: callback(EVENT_METHOD_PROGRESS_PERCENT,
                                                                                    m.bytes_read / m.len * 100))
        else:
            return encoder.MultipartEncoderMonitor(encoded_data, None)
def run(max_running_count):
    bar = None
    projects_cnt = 0
    while True:
        projects = get_projects()
        if not bar and projects:
            projects_cnt = len(projects)
            bar = Bar(
                'Running',
                fill='#',
                suffix='[%(index)d/%(max)d - %(percent).1f%% - %(elapsed)ds]',
                max=projects_cnt)
        running_count = 0
        todo_list = []
        if projects:
            for project in projects:
                if project['status'] == 'RUNNING':
                    running_count += 1
                elif project['status'] == 'TODO':
                    todo_list.append(project)
        if bar:
            bar.goto(projects_cnt - len(todo_list) - running_count)
        if running_count + len(todo_list) == 0:
            break
        elif running_count < max_running_count and len(todo_list) > 0:
            pi_list = []
            for i in range(max_running_count - running_count):
                if i < len(todo_list):
                    pi_list.append(todo_list[i])
                else:
                    break
            if len(pi_list) > 0:
                rpc_set_projects_status(pi_list, 'RUNNING')
                time.sleep(5)
                rpc_run_projects(pi_list)
        time.sleep(30)
    if bar:
        bar.goto(projects_cnt)
        bar.finish()
    return projects_cnt
def generate_with_progress(response, path: str):
    # dtd is resolved via base_url
    stream = response.raw
    parser = etree.XMLPullParser(
        events=['start', 'end'],
        base_url=path,
        load_dtd=True,
        dtd_validation=True,
    )
    suffix = '%(percent)d%% %(elapsed_td)s (ETA: %(eta_td)s)'
    length = int(response.headers['Content-Length'])
    progress = Bar(suffix=suffix, max=length, hide_cursor=False)
    try:
        for line in decompress_gzip_stream(stream):
            parser.feed(line)
            yield from parser.read_events()
            current_pos = stream.tell()
            if current_pos > progress.index:
                # have to check, otherwise ETA is screwed up
                progress.goto(current_pos)
    except KeyboardInterrupt:
        pass
    finally:
        progress.finish()
Beispiel #14
0
def main():
    al = 0
    bar = Bar('Carga: ', max=100)
    try:
        while int(status()) <= niv:
            if connected() == "DISCHARGING":
                print('\nCargador desconectado: code 1')
                print('\nSonara la alarma...')

                time.sleep(4)

                if int(vol()) != 30:
                    sys('termux-volume music 30')

                while al <= 5:
                    play(
                        '/storage/emulated/0/Download/alarma-de-evacuacin-evacuacion.mp3'
                    )
                    time.sleep(7)
                    al += 1

                break
            bar.goto(int(status()))
            sys('termux-notification -t Carga -c ' + str(status()) +
                '% -id=123')
            time.sleep(ckt)
        else:
            #sys('termux-telephony-call 5581532662')
            bar.next()
            bar.finish()
            sound()
    except KeyboardInterrupt:
        bar.finish()
        play('/storage/emulated/0/Download/trabar-carro-alarma-auto-.mp3')
        sys('cls')
        print('\nAlarma desactivada: code 0')
    if args.debug:
        embed()

    # Loop through the epochs
    for epoch in xrange(args.epochs):

        # Loop through the data
        bar = Bar('Epoch {}'.format(epoch + 1), max=len(range(num_samples)))
        for i in xrange(0, num_samples, reading_batch_size):
            img_blobs = image_data['blobs'][i : i + reading_batch_size]
            one_hots  = sentence_data['sentences/token_1hs'][i : i + reading_batch_size]
            img_blobs, one_hots = shuffle(img_blobs, one_hots, random_state=0)
            
            for j in xrange(0, reading_batch_size, args.batch_size):
                bar.goto(i + j) # increment progress bar with increments of batch_size

                t_begin = time.clock()

                # Extract data
                d_images    = img_blobs[j : j + args.batch_size]
                d_sentences = sentence_data['sentences/token_1hs'][j : j + args.batch_size]
                d_sentences = d_sentences[:, 0, :, :]
                d_images    = np.reshape(d_images, (d_images.shape[0] * d_images.shape[1], d_images.shape[2]))

                # Fit image_data
                #results = aligner.combined_model.fit([[d_images], [d_sentences]], [scores])
                loss = aligner.combined_model.train_on_batch([d_images, d_sentences])
                timing = time.clock() - t_begin
                print ' [Loss: {}, in {}s]'.format(loss, timing)
                if len(timings) < 10:
Beispiel #16
0
        r.set_max_distance(t.max_distance)
        
        if (key_control):
            r.set_control_func(get_key_movement)
        
        robots_list.add(r)
        
        robot_groups[i%threads_count].add(r)

    graphics_enabled = True
    generation = 0
    generation_scores = list()
    bar = Bar('Processing', max=t.max_distance)
    bar.check_tty = False
    for g in range (max_gen):
        bar.goto(0)
        print ("Starting generation {}".format(generation))
        for r in robots_list.sprites():
            r.reset(start_pos, start_heading)
        
        step = 0
        alive = True
        stalled_count = 0
        robots = robots_list.sprites()
        while(alive):
            
    #        screen.blit(t.track_image, (0,0))
            screen.blit(t.distances_image, (0,0))
            
            step += 1
            alive = 0
Beispiel #17
0
    class Query(object):
        def __init__(self, resp, progress, parent):
            self.__resp = resp
            self.__events = []
            self.__error = False
            self.__parent = parent

            if progress:
                self.__progress = Bar(
                    "Progress",
                    max=100,
                    suffix='%(percent)d%% %(elapsed)ds %(eta_td)s')
            else:
                self.__progress = None

        def poll_query(self):
            while True:
                res = self.__resp.json()
                continue_url = res['links'][0]['href']
                percentage = res['progress']

                if 'events' in res and res['events']:
                    self.__events.extend(res['events'])

                if self.__progress:
                    self.__progress.goto(percentage)
                res = session.get(continue_url, headers=self.__parent.headers)
                self.__resp = res
                if not (200 <= self.__resp.status_code <= 300
                        and 'links' in self.__resp.json()
                        and self.__resp.json()['links'][0]['rel'] == 'Self'):
                    break
                time.sleep(1)

            if self.__resp.status_code == 200:
                res = self.__resp.json()
                if 'events' in res and res['events']:
                    self.__events.extend(res['events'])

                if self.__progress:
                    self.__progress.goto(100)
                    self.__progress.finish()
                return self

            self.__error = True
            raise LogSearch.APIError("There was an api error")

        def get_resp(self):
            return self.__resp

        def events(self):
            if not self.__error:
                return map(lambda x: x['message'], self.__events)
            else:
                raise LogSearch.APIError("There was an api error")

        def count(self):
            if self.__error:
                raise LogSearch.APIError("There was an api error")

            res = self.__resp.json()
            if res['statistics']['groups'] != []:
                raise LogSearch.LSException(
                    "This query does not contain count information")

            calc_type = res['statistics']['type']
            if calc_type == 'count':
                key = 'global_timeseries'
            else:
                key = calc_type
            timeseries = res['statistics']['timeseries']

            if key not in timeseries:
                key = timeseries.keys()[0]

            return calc_type, [
                v[calc_type] for v in res['statistics']['timeseries'][key]
            ]

        def groups(self):
            if self.__error:
                raise LogSearch.APIError("There was an api error")
            table = []
            for group in self.__resp.json()['statistics']['groups']:
                group_name = group.keys()[0]
                value = group.values()[0].values()[0]
                table.append((
                    group_name,
                    value,
                ))
            return table

        def display(self, table_format=None):
            if self.__error:
                raise LogSearch.APIError("There was an api error")
            print(
                json.dumps(self.__resp.json()['leql'],
                           indent=4,
                           sort_keys=True))
            if 'events' in self.__resp.json():
                print(json.dumps(self.__resp.json(), indent=4, sort_keys=True))
                return
            elif 'statistics' in self.__resp.json():
                from_time = self.__resp.json()['statistics']['from']
                to_time = self.__resp.json()['statistics']['to']
                calc_type = self.__resp.json()['statistics']['type']
                key = self.__resp.json()['statistics'].get(
                    'key', 'global_timeseries')
                print('Statistics response', )
                if 'groups' in self.__resp.json(
                )['statistics'] and self.__resp.json()['statistics']['groups']:
                    print(
                        'groupby(%s)' %
                        self.__resp.json()['statistics']['key'], )
                    print('calculate(%s)' % calc_type + ' : ' +
                          key if key != 'global_timeseries' else calc_type)
                    headers = [
                        "Group",
                        self.__resp.json()['statistics']['type']
                    ]
                    table = self.groups()
                    print(
                        tabulate.tabulate(table,
                                          headers=headers,
                                          tablefmt=table_format))
                    return
                if 'timeseries' in self.__resp.json(
                )['statistics'] and self.__resp.json(
                )['statistics']['timeseries']:
                    print(
                        'calculate(', calc_type + ' : ' +
                        key if key != 'global_timeseries' else calc_type, ')')

                    table = []
                    headers = [
                        "Timestamp", calc_type + ' : ' +
                        key if key != 'global_timeseries' else calc_type
                    ]
                    timeseries = self.__resp.json(
                    )['statistics']['timeseries'][key]
                    interval = (from_time - to_time) / len(timeseries)
                    timestamp = from_time
                    for ts in timeseries:
                        table.append((
                            ms_to_date_string(timestamp),
                            str(ts[calc_type]),
                        ))
                        timestamp += interval
                    print(
                        tabulate.tabulate(table,
                                          headers=headers,
                                          tablefmt=table_format))
                    return

                print(json.dumps(self.__resp.json(), indent=4, sort_keys=True))
                return

        @staticmethod
        def __validate_table_format(table_format):
            if table_format not in tabulate.tabulate_formats and table_format != 'csv':
                raise ValueError(
                    "Invalid table format '%s', valid values are '%s'" %
                    (table_format, ', '.join(tabulate.tabulate_formats)))
def main(args):
    image_path = args.image_dir.rstrip('/')

    # Use GPU processing, because faster = better
    caffe.set_mode_gpu()

    # Construct CNN from input files
    cnn = caffe.Net(args.prototxt, args.model, caffe.TEST)

    # Setup CNN such that it can process n_samples = batch_size
    cnn.blobs['data'].reshape(args.batch_size, 3, 224, 224)

    failed_images = []
    for split_name in ['train', 'val']:
        with open('{}/{}2014list.txt'.format(image_path, split_name)) as f:
            file_data = f.read()
            lines = file_data.split()

            average_per_batch = []

            bar = Bar('Processing {}'.format(split_name), max=len(lines))
            for i in xrange(0, len(lines), args.batch_size):
                bar.goto(i)

                time_b = time.clock() # Start timing

                image_files = lines[i : i + args.batch_size] # Image filenames for this batch

                # Preprocessing the batch of images
                cnn_in = np.zeros((args.batch_size, 3, 224, 224), dtype=np.float32)
                for img_idx, img in enumerate(image_files):
                    try:
                        cnn_in[img_idx, :] = load_image('{}/{}/{}'.format(image_path, split_name, img))
                    except:
                        # Image is corrupt or missing
                        failed_images.append('{}/{}'.format(split_name, img))
                        print 'Image {} failed.'.format(img)
                out = cnn.forward_all(blobs=['conv5_4'], **{'data': cnn_in})

                # Get features from CNN
                feat = cnn.blobs['conv5_4'].data
                feat = feat.transpose((0, 2, 3, 1))
                if len(image_files) < args.batch_size:
                    feat = feat[len(image_files), :, :, :]

                embed()

                # Store it in a sparse matrix
                if i == 0:
                    feat_flatten_list = csr_matrix(np.array(map(lambda x: x.flatten(), feat)))
                else:
                    feat_flatten_list = vstack([feat_flatten_list, csr_matrix(np.array(map(lambda x: x.flatten(), feat)))])

                average_per_batch.append(time.clock() - time_b) # Book-keeping

                if i % 1000 == 0 and i != 0:
                    print '\n{}s per batch'.format(np.mean(average_per_batch))
                    average_per_batch = [] # Empty it again

            bar.finish()

        # Save it to an .npz file for later use
        out = {
            'data': feat_flatten_list.data,
            'indices': feat_flatten_list.indices,
            'indptr': feat_flatten_list.indptr,
            'shape': feat_flatten_list.shape
        }

        print 'Saving to {}/{}.npz'.format(args.out_dir, split_name)
        np.savez('{}/{}.npz'.format(args.out_dir, split_name), **out)
Beispiel #19
0
class Fetcher:
    def check_hash(self, filename, dhash='md5'):
        '''Compute file hash
        '''
        # BUF_SIZE is totally arbitrary, change for your app!
        BUF_SIZE = 65536  # lets read stuff in 64kb chunks!

        if dhash == 'md5':
            fhash = hashlib.md5()
        elif dhash == 'sha1':
            fhash = hashlib.sha1()
        else:
            return -1

        with open(filename, 'rb') as f:
            while True:
                data = f.read(BUF_SIZE)
                if not data:
                    break
                fhash.update(data)

        return fhash.hexdigest()

    def get(self, url, fname=False):
        '''Download file from url using requests library
        '''
        r = requests.get(url, stream=True, verify=False)
        size = r.headers['content-length']
        if not fname:
            fname = url.split('/')[-1]

        if size:
            p = Bar(fname, max=int(size))
        else:
            p = Spinner(fname)

        with open(fname, 'wb') as f:
            for chunk in r.iter_content(chunk_size=1024 * 50):
                if chunk:  # filter out keep-alive new chunks
                    p.next(len(chunk))
                    f.write(chunk)

        p.finish()
        return fname

    def get_urllib(self, url, to):
        '''Download file from url using urllib (works for ftp urls)
        '''
        self.p = None

        def update(blocks, bs, size):
            if not self.p:
                if size < 0:
                    self.p = Spinner(to)
                else:
                    self.p = Bar(to, max=size)
            else:
                if size < 0:
                    self.p.update()
                else:
                    self.p.goto(blocks * bs)

        try:
            urlretrieve(url, to, update)
        except SSLCertVerificationError:
            ssl._create_default_https_context = ssl._create_unverified_context
            urlretrieve(url, to, update)

        self.p.finish()
Beispiel #20
0
if args.init_activity:
    print(f"Initialising activity file {args.activity_file}...")
    activity.init_activity_file(args.destination)
else:
    line_count = 0
    with open(args.source, 'r') as in_file:
        for line in in_file:
            line_count += 1

    with open(args.source, 'r') as in_file:
        with open(args.destination, 'w') as out_file:

            progress_bar = Bar("Reducing", max=line_count, check_tty=False)
            flow.Bin.print_headers(out_file)

            for i, line in enumerate(in_file):
                packet = Packet(line, i)
                device = devices[packet]
                if not device:
                    continue
                devices.flush_bins(out_file, time=packet.time)
                device.update(packet, out_file, activity)
                if not (i % 1000):
                    progress_bar.next(n=1000)

            progress_bar.goto(line_count)
            progress_bar.finish()
            devices.flush_bins(out_file, force=True)
            devices.store()
Beispiel #21
0
class DnsRazzle():
    def __init__(self, domain, out_dir, tld, dictionary, file, useragent, debug, threads, nmap, recon, browser_name):
        self.domains = []
        self.domain = domain
        self.out_dir = out_dir
        self.tld = tld
        self.dictionary = dictionary
        self.file = file
        self.useragent = useragent
        self.threads = []
        self.jobs = queue.Queue()
        self.jobs_max = 0
        self.debug = False
        self.nmap = nmap
        self.recon = recon
        self.nameserver = nameserver
        self.browser_name = browser_name



    def gen(self, shouldPrint=False):
        fuzz = dnstwist.DomainFuzz(self.domain, self.dictionary, self.tld)
        fuzz.generate()
        if self.tld is not None:
            for entry in fuzz.domains.copy():
                for tld in self.tld:
                    new_domain = ".".join(entry["domain-name"].split(".")[:-1]) + "." + tld;
                    fuzz.domains.append({"fuzzer": 'tld-swap', "domain-name": new_domain})
            m = getattr(fuzz, "_DomainFuzz__postprocess")
            m()
        if shouldPrint:
            for entry in fuzz.domains[1:]:
                print(entry['domain-name'])
        self.domains = fuzz.domains



    def gendom_start(self, useragent, threadcount=10):
        url = dnstwist.UrlParser(self.domain)

        for i in range(len(self.domains)):
            self.jobs.put(self.domains[i])
        self.jobs_max = len(self.domains)

        for _ in range(threadcount):
            worker = dnstwist.DomainThread(self.jobs)
            worker.setDaemon(True)

            self.kill_received = False
            self.debug = False

            worker.option_extdns = True
            worker.option_geoip = False
            worker.option_ssdeep = False
            worker.option_banners = True
            worker.option_mxcheck = True

            worker.nameservers = [nameserver]
            self.useragent = useragent

            worker.uri_scheme = url.scheme
            worker.uri_path = url.path
            worker.uri_query = url.query

            worker.domain_init = url.domain
            worker.start()
            self.threads.append(worker)

        self.bar =  Bar('Processing domain permutations', max=self.jobs_max - 1)


    def gendom_stop(self):
        for worker in self.threads:
            worker.stop()
            worker.join()
        self.bar.finish()

    def gendom_progress(self):
        self.bar.goto(self.jobs_max - self.jobs.qsize())


    def _whois(self, domains, debug):
        num_doms = len(domains)
        pBar = Bar('Running whois queries on discovered domains', max=num_doms - 1)
        for domain in domains:
            if len(domain) > 2:
                try:
                    whoisq = query(domain['domain-name'].encode('idna').decode())
                except Exception as e:
                    if debug:
                        print_error(e)
                else:
                  if whoisq is not None:
                    if whoisq.creation_date:
                        domain['whois-created'] = str(whoisq.creation_date).split(' ')[0]
                    if whoisq.registrar:
                        domain['whois-registrar'] = str(whoisq.registrar)
            pBar.next()
        pBar.finish()

    def portscan(self, domains, out_dir):
        print_status(f"Running nmap on {domains}")
        nm = nmap.PortScanner()
        nm.scan(hosts=domains, arguments='-A -T4 -sV')
        f = open(out_dir + '/nmap/' + domains + '.csv', "w")
        f.write(nm.csv())
        f.close()

    def recondns(self, domains, out_dir, threads):
        '''
        :param domain: domain to run dnsrecon on
        :param out_dir: output directory to save records to
        general_enum arguments : res, domain, do_axfr, do_bing, do_yandex, do_spf, do_whois, do_crt, zw, thread_num=None
        :return:
        '''
        print_status(f'Running reconDNS report on {domains}!')
        ns_server = [nameserver]
        request_timeout = 10
        proto = 'udp'
        res = DnsHelper(domains, ns_server, request_timeout, proto)
        std_records = general_enum(res, domains, False, False, False, True, False, True, True, threads)
        write_to_file(make_csv(std_records), out_dir , '/reconDNS/' + domains + '.txt')

    def check_domain(self, domains, r_domain, out_dir, nmap, recon, threads):
        '''
        primary method for performing domain checks
        '''

        self.screenshot_domain(domains['domain-name'], out_dir + '/screenshots/')
        ssim_score = compare_screenshots(out_dir + '/screenshots/originals/' + r_domain + '.png',
                            out_dir + '/screenshots/' + domains['domain-name'] + '.png')
        domains['ssim-score'] = ssim_score
        if nmap:
            self.portscan(domains['domain-name'], out_dir)
        if recon:
            self.recondns(domains['domain-name'], out_dir, threads)



    def screenshot_domain(self, domain, out_dir):
        """
        function to take screenshot of supplied domain
        """
        print_status(f"collecting screenshot of {domain}!")
        url = "http://" + str(domain).strip('[]')
        try:
            if self.browser_name == 'chrome':
                options = webdriver.ChromeOptions()
                options.headless = True
                try:
                    from webdriver_manager.chrome import ChromeDriverManager
                    driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
                except Exception as E:
                    print_error(f"Unable to install/update Chrome webdriver because {E}")

            elif self.browser_name == 'firefox':
                options = webdriver.FirefoxOptions()
                options.headless = True
                try:
                    from webdriver_manager.firefox import GeckoDriverManager
                    s = webdriver.firefox.service.Service(executable_path=GeckoDriverManager().install())
                    driver = webdriver.Firefox(service=s, options=options)
                except Exception as E:
                    print_error(f"Unable to install/update Firefox webdriver because {E}")

            else:
                print_status(f"Unimplemented webdriver browser: {self.browser_name}")

            driver.get(url)

            ss_path = str(out_dir + domain + '.png')

            driver.set_window_size(1920, 1080)  # May need manual adjustment
            driver.get_screenshot_as_file(ss_path)
            driver.quit()
            print_good(f"Screenshot for {domain} saved to {ss_path}")
        except WebDriverException as exception:
            print_error(f"Unable to screenshot {domain}!")