Пример #1
0
        def test_vcolor(self):
            from ascii_graph.colordata import vcolor
            
            test = [('testval0', 600),
                    ('testval1', 500),
                    ('testval2', 400),
                    ('testval3', 400),
                    ('testval4', 300),
                    ('testval5', 200),
                    ('testval6', 100),
                    ('testval7', 50 )]
            
            expected = [('testval0', 600, Gre),
                    ('testval1', 500, Yel),
                    ('testval2', 400, Red),
                    ('testval3', 400, Gre),
                    ('testval4', 300, Yel),
                    ('testval5', 200, Red),
                    ('testval6', 100, Gre),
                    ('testval7', 50,  Yel)]
 
            pattern = [Gre, Yel, Red]
            
            data = vcolor(test, pattern)
            assert data == expected
Пример #2
0
        def test_vcolor(self):
            from ascii_graph.colordata import vcolor
            
            test = [('testval0', 600),
                    ('testval1', 500),
                    ('testval2', 400),
                    ('testval3', 400),
                    ('testval4', 300),
                    ('testval5', 200),
                    ('testval6', 100),
                    ('testval7', 50 )]
            
            expected = [('testval0', 600, Gre),
                    ('testval1', 500, Yel),
                    ('testval2', 400, Red),
                    ('testval3', 400, Gre),
                    ('testval4', 300, Yel),
                    ('testval5', 200, Red),
                    ('testval6', 100, Gre),
                    ('testval7', 50,  Yel)]
 
            pattern = [Gre, Yel, Red]
            
            data = vcolor(test, pattern)
            assert data == expected
Пример #3
0
def makeGraph(fList):
    docs = 0
    vids = 0
    songs = 0
    images = 0
    others = 0
    compressed = 0
    codess = 0
    ll = len(fList)
    if (oss == 1):
        print("Total no of files in system: ", ll)
    else:
        print("Total no of files in user home: ", ll)
    for file in fList:
        if file.name.lower().endswith('.pdf') or file.name.lower().endswith(
                '.docx') or file.name.lower().endswith(
                    '.doc') or file.name.lower().endswith('.txt'):
            docs += 1

        if file.name.lower().endswith('.mp4') or file.name.endswith(
                '.mkv') or file.name.endswith('.avi'):
            vids += 1

        if file.name.lower().endswith('.jpeg') or file.name.endswith(
                '.png') or file.name.endswith('.jpg') or file.name.endswith(
                    '.gif'):
            images += 1

        if file.name.lower().endswith('.mp3') or file.name.endswith(
                '.ogg') or file.name.endswith('.wav'):
            songs += 1

        if file.name.endswith('.apk') or file.name.endswith(
                '.jar') or file.name.endswith('.exe') or file.name.endswith(
                    '.iso') or file.name.endswith(
                        '.dmg') or file.name.endswith(
                            '.csv') or file.name.endswith(
                                '.log') or file.name.endswith('.db'):
            others += 1

        if file.name.lower().endswith('.zip') or file.name.endswith(
                '.7z') or file.name.endswith('.deb') or file.name.endswith(
                    '.tar.gz') or file.name.endswith('.rpm'):
            compressed += 1

        if file.name.endswith('.c') or file.name.endswith(
                '.py') or file.name.endswith('.java') or file.name.endswith(
                    '.cpp'):
            codess += 1

    data = [('docs', docs), ('songs', songs), ('videos', vids),
            ('images', images), ('codes', codess), ("compressed", compressed),
            ('others', others)]

    pattern = [Gre, Yel, Red]
    data = vcolor(data, pattern)
    graph = Pyasciigraph()
    for line in graph.graph('Files on PC', data):
        print(line)
Пример #4
0
def make_graph(data, portfolio):
    bar_data = []
    portfolio_total_cad = 0
    portfolio_total_usd = 0
    longest = 0
    for row in data:
        id = row['id']
        if (len(id) > longest):
            longest = len(id)
        cad_value = float(portfolio[id]) * float(row['price_cad'])
        value = float(portfolio[id]) * float(row['price_usd'])
        bar_data.append((id, value))
        portfolio_total_cad += cad_value
        portfolio_total_usd += value

    def get_rank(elem):
        id = elem[0]
        for row in data:
            if (row['id'] == id):
                rank = row['rank']
        return int(rank)

    bar_data.sort(key=get_rank)

    graph_data = []
    i = 0
    for elem in bar_data:
        i += 1
        id = elem[0]
        value = elem[1]
        label = '$  -  ' + id

        spaces = longest - len(id)
        for _ in range(0, spaces):
            label += ' '

        percent = '\t' + str(round(
            (value / portfolio_total_usd) * 100, 2)) + '%'
        label += percent

        if (i % 2 == 0):
            label = click.style(label, fg='white')
        else:
            label = click.style(label, fg='cyan')

        graph_data.append((label, value))

    pattern = [Cya, Whi]
    graph_data = vcolor(graph_data, pattern)
    graph = Pyasciigraph(graphsymbol=None)
    bar_graph = graph.graph(label=None, data=graph_data)
    return [bar_graph, portfolio_total_usd, portfolio_total_cad]
Пример #5
0
def graph_oneperline(test_data):
    # V color test

    # One color per line
    print('Color example:')
    pattern = [Gre, Yel, Red]
    data = vcolor(test_data, pattern)

    graph = Pyasciigraph()

    for line in graph.graph('vcolor test', data):
        print(line)
    return graph, data
Пример #6
0
    def display_graph(self):
        #print('Thread {}: waiting for lock'.format(threading.get_ident()))
        pattern = [Gre, Blu, Red]
        
        self._graph_lock.acquire(blocking=True)
        data = vcolor([('Heart Rate', self._cur_pulse), ('SYS mmHg kPa', self._cur_systolic),
                ('DIA mmHg kPa', self._cur_diastolic), ('Oxygen Saturation', self._cur_oxygen)],pattern)

        for graph_line in self._graph.graph('VITAL READINGS', data):
            print(
                graph_line
            )
        #print('Thread {}: releasing lock'.format(threading.get_ident()))
        self._graph_lock.release()
    def emotion_graph(self):
        from ascii_graph import Pyasciigraph
        import ascii_graph.colors as c
        from ascii_graph.colordata import vcolor

        col_pattern = [c.Gre, c.Yel, c.Cya, c.Red, c.Pur]
        data = [
            (str(emotion).capitalize(), float(value) * 100)
            for emotion, value in self.__tweets_emotion['docEmotions'].items()
        ]
        total_emotion_value = vcolor(data, col_pattern)
        graph = Pyasciigraph()
        for line in graph.graph('Emotions Graph', total_emotion_value):
            print('         ', line)
        return
Пример #8
0
    def plot(klass, str, n, action='print'):
        """
    Plot the n-gram histogram
    """
        bigram = NgramParser.parse(str, n)

        # Setup the histogram
        graph = Pyasciigraph(line_length=1,
                             separator_length=4,
                             graphsymbol='+')

        pattern = [Gre, Yel, Red, Blu]
        graph_data = vcolor(bigram.items(), pattern)

        # Yield the histogram
        for line in graph.graph('bigram count', graph_data):
            yield (line)
Пример #9
0
    def asciiplot(self,
                  row_or_col_name,
                  axis=0,
                  colours=True,
                  num_to_plot=100,
                  line_length=120,
                  min_graph_length=50,
                  separator_length=4,
                  multivalue=False,
                  human_readable='si',
                  graphsymbol='*',
                  float_format='{:,.2f}',
                  **kwargs):
        """
        A very quick ascii chart for result
        """
        from ascii_graph import Pyasciigraph
        from ascii_graph.colors import Gre, Yel, Red, Blu
        from ascii_graph.colordata import vcolor
        from ascii_graph.colordata import hcolor
        import pydoc

        graph = Pyasciigraph(line_length=line_length,
                             min_graph_length=min_graph_length,
                             separator_length=separator_length,
                             multivalue=multivalue,
                             human_readable=human_readable,
                             graphsymbol=graphsymbol)
        if axis == 0:
            dat = self.results.T[row_or_col_name]
        else:
            dat = self.results[row_or_col_name]
        data = list(zip(dat.index, dat.values))[:num_to_plot]
        if colours:
            pattern = [Gre, Yel, Red]
            data = vcolor(data, pattern)

        out = []
        for line in graph.graph(label=None,
                                data=data,
                                float_format=float_format):
            out.append(line)
        pydoc.pipepager('\n'.join(out), cmd='less -X -R -S')
Пример #10
0
def ask_continue_trainig(train_args: TrainArgs) -> (bool, TrainArgs):
    if train_args.train:
        if train_args.train_steps >= train_args.max_train_steps:
            train_args.pbar.close()

            def ask_continue() -> bool:
                input_text = input("Continue training?(y|n|q=quit):")
                if input_text == "q" or input_text == "Q":
                    sys.exit()
                if not __check_length(
                        1, input_text) or not __check_yes_no(input_text):
                    return ask_continue()

                return (True
                        if input_text == "y" or input_text == "Y" else False)

            results = [("AI", int(train_args.ai_wins)),
                       ("Trainer", int(train_args.trainer_wins)),
                       ("Draw", int(train_args.draws))]
            color_pattern = [Red, Gre, Blu]
            data = vcolor(results, color_pattern)

            graph = Pyasciigraph(force_max_value=train_args.max_train_steps)
            for line in graph.graph('Wins:', data):
                print(line)

            train_args.ai_wins = 0
            train_args.draws = 0
            train_args.trainer_wins = 0
            train_args.train_steps = 0
            train_args.train = ask_continue()
            if train_args.train:
                train_args.pbar = tqdm(range(0, train_args.max_train_steps),
                                       leave=True)

    return train_args
Пример #11
0
def main():

	os.system('clear')

	parser = argparse.ArgumentParser(description='cpu and memory realtime graph')

	parser.add_argument('-r','--routerName', required=True, dest="deviceName", default="localhost")
	parser.add_argument('-c','--community', required=False, dest="deviceComm", default="public")

	parser.add_argument('-m','--memory', required=False, action='store_true', dest="deviceMem", default=None)

	parser.add_argument('-d','--debug', required=False, action='store_true', dest="debug")
	
	args = parser.parse_args()

	# ++++++++++++++++++++
	if args.debug:
		logging.basicConfig(level=logging.DEBUG)
	else:
		logging.basicConfig(level=logging.INFO)
	logger = logging.getLogger(__name__)
	# --------------------	

	r1 = Device(args.deviceName, args.deviceComm)

	cpuData = []

	for index in range(10):

		### system infos

		print r1.get_hostname
		print 
		print r1.get_descr
		# print r1.get_objID
		print

		### 5 sec cpu with history

		r1.set_cpuStat()
		logging.debug(r1._cpu_5_sec)

		for inx,r1_5_sec_explode in enumerate(r1._cpu_5_sec):
			if index == inx:
				cpuData.append(('cpmCPUTotal5sec <==', r1_5_sec_explode))
			else:
				cpuData.append(('cpmCPUTotal5sec', r1_5_sec_explode))

		pattern = [Gre, Gre, Gre]
		col_cpuData = vcolor(cpuData, pattern)

		graph = Pyasciigraph(
			graphsymbol='*')
		for line in  graph.graph(label='CPU Graph (sh processes cpu sorted | i ^CPU)', data=col_cpuData):
		    print line

		print
		### 1 and 5 min cpu

		logging.debug(r1._cpu_1_min)
		logging.debug(r1._cpu_5_min)

		cpuData = [ \
			('cpmCPUTotal1min', r1._cpu_1_min[index]), \
			('cpmCPUTotal5min', r1._cpu_5_min[index]), \
			]

		pattern = [Yel, Red]
		col_cpuData = vcolor(cpuData, pattern)

		graph = Pyasciigraph()
		for line in  graph.graph(label='CPU Graph (sh processes cpu sorted | i ^CPU)', data=col_cpuData):
		    print line

		print
		### memory stats

		if args.deviceMem:

			r1.set_memStat()
			logging.debug(r1._mem_Free)
			logging.debug(r1._mem_Used)
			logging.debug(r1._mem_Alloc)

			memData = [ \
				('ciscoMemoryTotal', r1._mem_Free[index] + r1._mem_Used[index]), \
				('ciscoMemoryPoolFree', r1._mem_Free[index]), \
				('ciscoMemoryPoolUsed', r1._mem_Used[index]), \
				('ciscoMemoryPoolLargestFree', r1._mem_Alloc[index]), \
				]

			pattern = [Gre, Yel, Red, Blu]
			col_memData = vcolor(memData, pattern)

			graph = Pyasciigraph(
			human_readable='si',
			graphsymbol='+')
			for line in  graph.graph(label='MEM Graph (sh process memory sorted | i Processor Pool)', data=col_memData):
			    print line

		cpuData = []
		time.sleep(5)
		os.system('clear')
Пример #12
0
def main():

    os.system('clear')

    parser = argparse.ArgumentParser(
        description='cpu and memory realtime graph')

    parser.add_argument('-r',
                        '--routerName',
                        required=True,
                        dest="deviceName",
                        default="localhost")
    parser.add_argument('-c',
                        '--community',
                        required=False,
                        dest="deviceComm",
                        default="public")

    parser.add_argument('-m',
                        '--memory',
                        required=False,
                        action='store_true',
                        dest="deviceMem",
                        default=None)

    parser.add_argument('-d',
                        '--debug',
                        required=False,
                        action='store_true',
                        dest="debug")

    args = parser.parse_args()

    # ++++++++++++++++++++
    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    # --------------------

    r1 = Device(args.deviceName, args.deviceComm)

    cpuData = []

    for index in range(10):

        ### system infos

        print r1.get_hostname
        print
        print r1.get_descr
        # print r1.get_objID
        print

        ### 5 sec cpu with history

        r1.set_cpuStat()
        logging.debug(r1._cpu_5_sec)

        for inx, r1_5_sec_explode in enumerate(r1._cpu_5_sec):
            if index == inx:
                cpuData.append(('cpmCPUTotal5sec <==', r1_5_sec_explode))
            else:
                cpuData.append(('cpmCPUTotal5sec', r1_5_sec_explode))

        pattern = [Gre, Gre, Gre]
        col_cpuData = vcolor(cpuData, pattern)

        graph = Pyasciigraph(graphsymbol='*')
        for line in graph.graph(
                label='CPU Graph (sh processes cpu sorted | i ^CPU)',
                data=col_cpuData):
            print line

        print
        ### 1 and 5 min cpu

        logging.debug(r1._cpu_1_min)
        logging.debug(r1._cpu_5_min)

        cpuData = [ \
         ('cpmCPUTotal1min', r1._cpu_1_min[index]), \
         ('cpmCPUTotal5min', r1._cpu_5_min[index]), \
         ]

        pattern = [Yel, Red]
        col_cpuData = vcolor(cpuData, pattern)

        graph = Pyasciigraph()
        for line in graph.graph(
                label='CPU Graph (sh processes cpu sorted | i ^CPU)',
                data=col_cpuData):
            print line

        print
        ### memory stats

        if args.deviceMem:

            r1.set_memStat()
            logging.debug(r1._mem_Free)
            logging.debug(r1._mem_Used)
            logging.debug(r1._mem_Alloc)

            memData = [ \
             ('ciscoMemoryTotal', r1._mem_Free[index] + r1._mem_Used[index]), \
             ('ciscoMemoryPoolFree', r1._mem_Free[index]), \
             ('ciscoMemoryPoolUsed', r1._mem_Used[index]), \
             ('ciscoMemoryPoolLargestFree', r1._mem_Alloc[index]), \
             ]

            pattern = [Gre, Yel, Red, Blu]
            col_memData = vcolor(memData, pattern)

            graph = Pyasciigraph(human_readable='si', graphsymbol='+')
            for line in graph.graph(
                    label=
                    'MEM Graph (sh process memory sorted | i Processor Pool)',
                    data=col_memData):
                print line

        cpuData = []
        time.sleep(5)
        os.system('clear')
Пример #13
0
def scan_directory_folders():
    print(pyfiglet.figlet_format('SPACE_AUDITOR', font='cybermedium'))
    separator_3()

    found_directories_average_file_size_list = []
    found_directories_average_video_size_list = []
    found_directories_total_folder_size_list = []
    found_counts_list = []

    graph = Pyasciigraph(separator_length=2)
    directory_selected_in_function = [get_directory_to_scan()]

    for found_dirs in os.listdir(directory_selected_in_function[0]):
        found_sub_directories_count = 0
        found_files_count = 0
        found_video_files_count = 0
        total_folder_size = 0
        directory_path = directory_selected_in_function[0] + '/' + found_dirs

        if os.path.isdir(directory_path):
            for found_items in os.listdir(directory_path):

                if os.path.isdir(directory_path + '/' + found_items):
                    found_sub_directories_count = found_sub_directories_count + 1

                elif os.path.isfile(directory_path + '/' +
                                    found_items) and found_items.endswith(
                                        video_extensions):
                    found_video_files_count = found_video_files_count + 1

                else:
                    found_files_count = found_files_count + 1
            found_counts_list.append([
                found_dirs, 'FILES', found_files_count, 'VIDEOS',
                found_video_files_count, 'SUB-DIRECTORIES',
                found_sub_directories_count
            ])

        else:
            found_files_count = found_files_count + 1
            found_counts_list.append([
                found_dirs, 'FILES', found_files_count, 'VIDEOS',
                found_video_files_count, 'SUB-DIRECTORIES',
                found_sub_directories_count
            ])

        try:

            for path, dirs, files in os.walk(directory_path):
                for f in files:
                    fp = os.path.join(path, f)
                    total_folder_size += os.path.getsize(fp)

        except (FileExistsError, FileNotFoundError, OSError, TypeError,
                ValueError) as e:
            print('\n', 'FILE ERROR: ', e)
            separator_3()

        if int(found_files_count) > 0:
            total_folder_size_in_mb = (int(total_folder_size) / 1048576)
            average_file_size = total_folder_size_in_mb / found_files_count
            average_file_size_in_mb = str(int(average_file_size))[:4]
            found_directories_total_folder_size_list.append(
                [found_dirs, total_folder_size_in_mb])
            found_directories_average_file_size_list.append(
                [found_dirs, int(average_file_size_in_mb)])

            if int(found_video_files_count) > 0:
                average_video_file_size = total_folder_size_in_mb / found_video_files_count
                average_video_file_size_in_mb = str(
                    int(average_video_file_size))[:4]
                found_directories_average_video_size_list.append(
                    [found_dirs,
                     int(average_video_file_size_in_mb)])
        else:
            total_folder_size_in_mb = (int(total_folder_size) / 1048576)
            average_file_size_in_mb = int(0)
            average_video_file_size_in_mb = int(0)
            found_directories_total_folder_size_list.append(
                [found_dirs, total_folder_size_in_mb])
            found_directories_average_file_size_list.append(
                [found_dirs, int(average_file_size_in_mb)])
            found_directories_average_video_size_list.append(
                [found_dirs, int(average_video_file_size_in_mb)])

    sorted_found_directories_total_list = sorted(
        found_directories_total_folder_size_list,
        reverse=True,
        key=lambda x: x[1])
    sorted_found_directories_average_list = sorted(
        found_directories_average_file_size_list,
        reverse=True,
        key=lambda x: x[1])
    sorted_found_directories_vid_avg_list = sorted(
        found_directories_average_video_size_list,
        reverse=True,
        key=lambda x: x[1])
    graph_color_pattern = [Gre, Blu, Pur, Red]
    color_coded_directory_totals = vcolor(sorted_found_directories_total_list,
                                          graph_color_pattern)
    color_coded_directory_averages = vcolor(
        sorted_found_directories_average_list, graph_color_pattern)
    color_coded_directory_vid_avg = vcolor(
        sorted_found_directories_vid_avg_list, graph_color_pattern)

    def sub_interface():
        while True:
            print(
                'PLEASE SELECT AN OPTION: ', '\n', '\n',
                '1) SORT BY TOTAL SIZE'
                '                             2) SORT BY AVERAGE FILE-SIZE',
                '\n', '\n', '3) SORT BY AVERAGE VIDEO FILE-SIZE'
                '                4) SCAN A DIFFERENT DIRECTORY', '\n', '\n',
                '0) MAIN MENU')
            separator_3()
            bct_input = input('ENTER OPTION #: ')
            separator_3()

            try:

                if int(bct_input) == 1:
                    for line in graph.graph(
                            'DIRECTORY TOTALS - (TOTAL SIZE IN MB, DIRECTORY): ',
                            data=color_coded_directory_totals):
                        print(line, '\n', ('-' * 100))
                    print()
                    separator_3()

                elif int(bct_input) == 2:
                    for line in graph.graph(
                            'DIRECTORY TOTALS - (AVERAGE FILE-SIZE IN MB, DIRECTORY): ',
                            data=color_coded_directory_averages):
                        print(line, '\n', ('-' * 100))
                    print()
                    separator_3()

                elif int(bct_input) == 3:
                    for line in graph.graph(
                            'DIRECTORY TOTALS - (AVERAGE VIDEO FILE-SIZE IN MB, DIRECTORY): ',
                            data=color_coded_directory_vid_avg):
                        print(line, '\n', ('-' * 100))
                    print()
                    separator_3()

                elif int(bct_input) == 4:
                    scan_directory_folders()

                elif int(bct_input) == 0:
                    return

            except (TypeError, ValueError, UnicodeDecodeError,
                    ZeroDivisionError) as er:
                print(
                    er, '\n', ('-' * 100), '\n',
                    'INPUT ERROR, PLEASE RETRY SELECTION USING NUMBER KEYS: ')
                return

    sub_interface()
Пример #14
0
def cli():
    """
    Entry Point for tweetle
    """

    click.secho('''
$$$$$$$$\                                 $$\     $$\           
\__$$  __|                                $$ |    $$ |          
   $$ |$$\  $$\  $$\  $$$$$$\   $$$$$$\ $$$$$$\   $$ | $$$$$$\  
   $$ |$$ | $$ | $$ |$$  __$$\ $$  __$$\\_$$  _|  $$ |$$  __$$\ 
   $$ |$$ | $$ | $$ |$$$$$$$$ |$$$$$$$$ | $$ |    $$ |$$$$$$$$ |
   $$ |$$ | $$ | $$ |$$   ____|$$   ____| $$ |$$\ $$ |$$   ____|
   $$ |\$$$$$\$$$$  |\$$$$$$$\ \$$$$$$$\  \$$$$  |$$ |\$$$$$$$\ 
   \__| \_____\____/  \_______| \_______|  \____/ \__| \_______|
   ''',
                fg='bright_cyan')

    #A setup for tweetle interface
    while True:

        #Checking if the user exists in the added accounts
        user = click.prompt(
            'Enter Account User (Type "none" if not setup or if entering a new user)'
        )
        if user.lower() != 'none':
            comm = commands.Commands(user)
            try:
                api_key, api_key_secret, access_token, access_token_secret, sql_username, sql_pw = comm.read_accs(
                )
                import tweetle.packages.ProjPySQL as ProjPySQL
                sql = ProjPySQL.db(user)
                click.secho('[+] User Found', fg='bright_green')

                break

            except Exception as e:
                print(e)
                click.secho('[-] Invalid User, Try Again.', fg='bright_red')

        #If the user wants to setup a new account
        else:
            comm = commands.Commands(user)
            comm.write()

    #User's profile information
    me = comm.profile()
    my_user = me.name
    my_desc = me.description
    followers = int(me.followers_count)
    following = int(me.friends_count)
    created = me.created_at
    status_count = int(me.statuses_count)
    prof_image = me.profile_image_url
    prof_banner = me.profile_banner_url

    #Stats bar
    stats = [('Followers', followers), ('Following', following),
             ('Tweets', status_count)]
    pattern = [Gre, Yel, Red]
    data = vcolor(stats, pattern)

    #Welcome art
    welcome = text2art('                         ' + 'Welcome' + '  ' +
                       my_user)

    #Welcome animation
    colorama.init(autoreset=False)
    click.echo('\n')
    for letter in welcome:
        sys.stdout.write(colorama.Fore.CYAN)
        sys.stdout.write(letter)
        sys.stdout.flush()
        time.sleep(0.000075)

    click.echo('\n')

    graph = Pyasciigraph()

    #Statistics animation
    for line in graph.graph(f'Description: {my_desc}', data)[2:]:
        sys.stdout.write('                                 ')
        for letter in line:
            sys.stdout.write(letter)
            sys.stdout.flush()
            time.sleep(0.02)
        print('\n')

    click.secho('''
                                                OPTIONS:
                                                -a : About Tweetle
                                                -c : Get a list of commands
                                                -q : Quit
''',
                bold=True,
                fg='white')

    #Command prompts
    while True:
        click.echo('\n')
        inp = click.prompt('Tweetle')

        if inp.lower() == '-a':
            comm.about()

        elif inp.lower() == '-q':
            break

        elif inp.lower() == '-c':
            comm.commands()

        elif inp.startswith('-tweet'):
            status = inp.split(' ')[1:]
            click.echo(status)
            comm.tweet(status)

        elif inp.startswith('-fetch'):
            num = inp.split(' ')[-1]
            keyword = inp.split(' ')[1:-1]
            comm.fetch(sql, keyword, int(num))

        elif inp.startswith('-clean'):
            comm.CleanDB(sql)

        elif inp.startswith('-retweet'):
            num = inp.split(' ')[1]
            comm.retweet(sql, int(num))

        elif inp.startswith('-data'):
            comm.alldata(sql)

        elif inp.startswith('-row'):
            num = inp.split(' ')[1]
            comm.getrow(sql, int(num))

        elif inp.startswith('-first'):
            num = inp.split(' ')[1]
            comm.top(sql, int(num))

        elif inp.startswith('-latest'):
            comm.bytime(sql)

        else:
            click.echo('Invalid Option')
Пример #15
0
def chart (tochart) :
    chart = Pyasciigraph(graphsymbol=themes[theme]['symbol'])
    data = vcolor(tochart, themes[theme]['chart'])
    for line in chart.graph(label=None, data=data):
        print(line)
Пример #16
0
        ('testval2', 501, URed),
        ('testval3', 103, IRed),
        ('testval4',  29, BIGre),
        ('testval5',  19, UYel),
        ('testval6',  99, ICya),
        ('testval7', 404, BBlu)]
graph = Pyasciigraph()
for line in graph.graph('test graph', test):
    print(line)


# Coloring data according to a pattern (one color each line)
from ascii_graph.colordata import vcolor

test = [('testval0', 600),
        ('testval1', 500),
        ('testval2', 400),
        ('testval3', 400),
        ('testval4', 300),
        ('testval5', 200),
        ('testval6', 100),
        ('testval7', 50 )]

pattern = [Gre, Yel, Red]

data = vcolor(test, pattern)
for line in graph.graph('vcolor test', data):
    print(line)

exit(0)
Пример #17
0
        ('testval3', 400, Red), ('testval4', 300, Gre), ('testval5', 200, Yel),
        ('testval6', 100, Cya), ('testval7', 50, Blu)]

graph = Pyasciigraph()
for line in graph.graph('test graph', test):
    print(line)

# Bold and underline
test = [('testval0', 142), ('testval1', 204, BPur), ('testval2', 501, URed),
        ('testval3', 103, IRed), ('testval4', 29, BIGre),
        ('testval5', 19, UYel), ('testval6', 99, ICya),
        ('testval7', 404, BBlu)]
graph = Pyasciigraph()
for line in graph.graph('test graph', test):
    print(line)

# Coloring data according to a pattern (one color each line)
from ascii_graph.colordata import vcolor

test = [('testval0', 600), ('testval1', 500), ('testval2', 400),
        ('testval3', 400), ('testval4', 300), ('testval5', 200),
        ('testval6', 100), ('testval7', 50)]

pattern = [Gre, Yel, Red]

data = vcolor(test, pattern)
for line in graph.graph('vcolor test', data):
    print(line)

exit(0)
Пример #18
0
def main():
  parser = argparse.ArgumentParser(
          description='Profiling a Reddit user\'s word usage',
          epilog="The data was there all along.")
  
  required = parser.add_argument_group('required arguments')
  required.add_argument('-u', '--user', type=str, help="Reddit username to analyze", required=True)
  parser.add_argument('-l', '--limit', type=int, help="Number of comments to profile, defaults to 100 (upper limit of 999 imposed by Reddit)", default=100)
  parser.add_argument('-d', '--dict', type = str, help="Path to target word definitions. Make sure file is in 'lists' directory and include .txt. Each word must be on separate line", default='bad_words.txt')
  parser.add_argument('-s', '--sort', help = "Sort graph. Include 'inc' for increasing or 'dec' for decreasing", dest = 'sort')
  parser.add_argument('-v', '--verbose', help="Include verbose breakdown for each Subreddit", dest = 'verbose', action = 'store_true')
  parser.set_defaults(verbose = False)
  parser.add_argument('-c', '--color', help="Included intensity colored graph, must have ANSI color enabled", dest ='color', action = 'store_true')
  parser.set_defaults(color = False)


  # Gather arguments 
  args = parser.parse_args()
  username = args.user
  limit = args.limit
  dict_path = args.dict
  verbose_output = args.verbose
  color_output = args.color
  sort = args.sort

  #Create praw.Reddit Object
  with open("config.json", "r") as file:
    config = json.load(file)

  client_id = config["client_id"]
  client_secret = config["client_secret"]
  user_agent = config["user_agent"]

  r = praw.Reddit(client_id = client_id,
                  client_secret = client_secret,
                  user_agent = user_agent)

  

  # Redditor object
  user = r.redditor(username)
  # Load list of targetted words
  with open("lists/"+dict_path) as f:
    targetwords = f.read().splitlines()


  # Dictionary of data objects
  data = {}

  # Main loop to iterate comments
  comments_affected = 0
  found_in_comment= False
  num_comments = 0
  for comment in user.comments.new(limit = limit):
    # Keep track of actual number of comments
    num_comments += 1
    # Convert to lower case to mitigate case sensitivity
    c_body = comment.body.lower()
    for t_word in targetwords:
      # Check each target word against the comment
      while re.search(r"\b" + re.escape(t_word) + r"\b", c_body):

        found_in_comment = True
        # Remove found match until no matches remain
        c_body = c_body.replace(t_word, '', 1)
        # Convert subreddit name to string
        sr_str = str(comment.subreddit)
        
        # Check if subreddit has been catalogued and add word
        if sr_str in data:
          data[sr_str].add_word(word = t_word)
        # If not, create Subreddit object and add word
        else:
          data[sr_str] = SubredditData(sr_str)
          data[sr_str].add_word(word = t_word)
    if found_in_comment:
      # Reset flag
      found_in_comment = False
      comments_affected += 1

  # Sort verbose output if desired
  if sort != None:
    #Sort subreddits by total count, then sort each word list by num occurances
    sort = sort.lower()
    if sort == 'inc':
      data = OrderedDict(sorted(data.items(), key=lambda x: x[1].total_count, reverse=False))
      for k, v in data.items():
        v.word_dict = OrderedDict(sorted(v.word_dict.items(), key= operator.itemgetter(1), reverse=False))
    elif sort == 'dec':
      data = OrderedDict(sorted(data.items(), key=lambda x: x[1].total_count, reverse=True))
      for k, v in data.items():
        v.word_dict = OrderedDict(sorted(v.word_dict.items(), key= operator.itemgetter(1), reverse=True))
  else:
      #Sort subreddits and word lists alphabetically
      data = OrderedDict(sorted(data.items(), key=lambda x: x[1].name.lower(), reverse=False))
      for k, v in data.items():
        v.word_dict = OrderedDict(sorted(v.word_dict.items(), key= operator.itemgetter(0), reverse=False))



  
  if verbose_output:
    print("\nBreakdown by individual subreddit\n")
    # Iterate through key, value pairs for results
    for k,v in data.items():
      print("/r/"+k)
      for word in v.word_dict:
        print("  '"+word+"' appears: "+ str(v.word_dict[word])+" time(s).")
 
  print("\nTotal comments analyzed: "+ str(num_comments))
  print("Number of comments containing target words: " + str(comments_affected))
  if num_comments > 0:
    ratio = round((comments_affected/floats(num_comments)) * 100, 3)
    print("Percentage of comments containing target words: "+str(ratio) +"%")   



  # Initialize graph
  bar_graph= []

  # Add data to graph
  for k,v in data.items():    
    bar_graph.append((k, v.total_count))

  # Determine each rows color if desired
  if color_output:
    pattern = create_color_pattern(data)        
    bar_graph = vcolor(bar_graph, pattern)

  # Sort graph if desired
  if sort != None:
    sort = sort.lower()
    if sort == 'inc':
      bar_graph = sorted(bar_graph, key=lambda value: value[1], reverse = False)
    elif sort == 'dec':
      bar_graph = sorted(bar_graph, key=lambda value: value[1], reverse = True)

  #Create graph
  graph = Pyasciigraph(
    graphsymbol='|',
    human_readable='si',
    multivalue = False,
    min_graph_length = 1
    )

  # Title of graph
  print("\n"+username+"'s Graph")
  #Display graph
  for line in graph.graph(label=None, data=bar_graph):
    print(line)

  #Display message if graph was empty
  if len(data) <= 0:
    print("Hmm. Nothing found")
Пример #19
0
def main():
  parser = argparse.ArgumentParser(
          description='Profiling a Reddit user\'s word usage',
          epilog="The data was there all along.")
  
  required = parser.add_argument_group('required arguments')
  required.add_argument('-u', '--user', type=str, help="Reddit username to analyze", required=True)
  parser.add_argument('-l', '--limit', type=int, help="Number of comments to profile, defaults to 100 (upper limit of 999 imposed by Reddit)", default=100)
  parser.add_argument('-d', '--dict', type = str, help="Path to target word definitions. Make sure file is in 'lists' directory and include .txt. Each word must be on separate line", default='bad_words.txt')
  parser.add_argument('-s', '--sort', help = "Sort graph. Include 'inc' for increasing or 'dec' for decreasing", dest = 'sort')
  parser.add_argument('-v', '--verbose', help="Include verbose breakdown for each Subreddit", dest = 'verbose', action = 'store_true')
  parser.set_defaults(verbose = False)
  parser.add_argument('-c', '--color', help="Included intensity colored graph, must have ANSI color enabled", dest ='color', action = 'store_true')
  parser.set_defaults(color = False)


  # Gather arguments 
  args = parser.parse_args()
  username = args.user
  limit = args.limit
  dict_path = args.dict
  verbose_output = args.verbose
  color_output = args.color
  sort = args.sort

  # Identifies our script and allows more requests
  user_agent = "Word Profiler v0.2 by i_am_hoenn"
  r = praw.Reddit(user_agent = user_agent)

  # Reddit object
  user = r.get_redditor(username)
  # Load list of targetted words
  with open("lists/"+dict_path) as f:
    targetwords = f.read().splitlines()


  # Dictionary of data objects
  data = {}

  # Main loop to iterate comments
  comments_affected = 0
  found_in_comment= False
  num_comments = 0
  for comment in user.get_comments(limit = limit):
    # Keep track of actual number of comments
    num_comments += 1
    # Convert to lower case to mitigate case sensitivity
    c_body = comment.body.lower()
    for t_word in targetwords:
      # Check each target word against the comment
      while re.search(r"\b" + re.escape(t_word) + r"\b", c_body):

        found_in_comment = True
        # Remove found match until no matches remain
        c_body = c_body.replace(t_word, '', 1)
        # Convert subreddit name to string
        sr_str = str(comment.subreddit)
        
        # Check if subreddit has been catalogued and add word
        if sr_str in data:
          data[sr_str].add_word(word = t_word)
        # If not, create Subreddit object and add word
        else:
          data[sr_str] = SubredditData(sr_str)
          data[sr_str].add_word(word = t_word)
    if found_in_comment:
      # Reset flag
      found_in_comment = False
      comments_affected += 1

  # Sort verbose output if desired
  if sort != None:
    #Sort subreddits by total count, then sort each word list by num occurances
    sort = sort.lower()
    if sort == 'inc':
      data = OrderedDict(sorted(data.items(), key=lambda x: x[1].total_count, reverse=False))
      for k, v in data.items():
        v.word_dict = OrderedDict(sorted(v.word_dict.items(), key= operator.itemgetter(1), reverse=False))
    elif sort == 'dec':
      data = OrderedDict(sorted(data.items(), key=lambda x: x[1].total_count, reverse=True))
      for k, v in data.items():
        v.word_dict = OrderedDict(sorted(v.word_dict.items(), key= operator.itemgetter(1), reverse=True))
  else:
      #Sort subreddits and word lists alphabetically
      data = OrderedDict(sorted(data.items(), key=lambda x: x[1].name.lower(), reverse=False))
      for k, v in data.items():
        v.word_dict = OrderedDict(sorted(v.word_dict.items(), key= operator.itemgetter(0), reverse=False))



  
  if verbose_output:
    print("\nBreakdown by individual subreddit\n")
    # Iterate through key, value pairs for results
    for k,v in data.items():
      print("/r/"+k)
      for word in v.word_dict:
        print("  '"+word+"' appears: "+ str(v.word_dict[word])+" time(s).")
 
  print("\nTotal comments analyzed: "+ str(num_comments))
  print("Number of comments containing target words: " + str(comments_affected))
  if num_comments > 0:
    ratio = round((comments_affected/num_comments) * 100, 3)
    print("Percentage of comments containing target words: "+str(ratio) +"%")   



  # Initialize graph
  bar_graph= []

  # Add data to graph
  for k,v in data.items():    
    bar_graph.append((k, v.total_count))

  # Determine each rows color if desired
  if color_output:
    pattern = create_color_pattern(data)        
    bar_graph = vcolor(bar_graph, pattern)

  # Sort graph if desired
  if sort != None:
    sort = sort.lower()
    if sort == 'inc':
      bar_graph = sorted(bar_graph, key=lambda value: value[1], reverse = False)
    elif sort == 'dec':
      bar_graph = sorted(bar_graph, key=lambda value: value[1], reverse = True)

  #Create graph
  graph = Pyasciigraph(
    graphsymbol='|',
    human_readable='si',
    multivalue = False,
    min_graph_length = 1
    )

  # Title of graph
  print("\n"+username+"'s Graph")
  #Display graph
  for line in graph.graph(label=None, data=bar_graph):
    print(line)

  #Display message if graph was empty
  if len(data) <= 0:
    print("Hmm. Nothing found")