예제 #1
0
def get_creds(n,peers):
	# maybe be able to handle ip or a hostname
	if n in peers.values():
		p = peers[n]
	else:
		found = False
		nc = 'PoolData/Creds/%s.pem' % n
		if os.path.isfile(os.getcwd()+'/'+nc):
			h, i, p, m = utils.load_credentials(nc.split('/')[-1].split('.')[0],False)
			p = {'hname':h,'ip':i,'pword':p,'mac':m,'connected':True}
			found = True
		else:
			for creds in os.listdir(os.getcwd()+'/PoolData/Creds'):
				cf = os.getcwd()+'/PoolData/Creds/%s' % creds
				if os.path.isfile(cf):
					h,i,p,m = utils.load_credentials(cf.split('/')[-1].split('.')[0],False)
					if i == n:
						p = {'hname':h,'ip':i,'pword':p,'mac':m,'connected':True}
						found = True
						break
		if not found:
			print('[!!] cannot find %s' % n)
			return '','','','',''
	
	ip = p['ip']
	mac = p['mac']
	hname = p['hname']
	pword = p['pword']
	connected = p['connected']
	return ip, mac, hname, pword, connected
예제 #2
0
def main():
	verbose = True
	peers = []; used = False
	if os.path.isfile('.env'):
		h,e,i,s = setup.load_local_vars()

		peers = setup.test_connections(True)
		print('[*] %d Peers Connected' % count_connected(peers))

	if '--cmd-all' in sys.argv:
		if len(peers):
			replies = cmd_all(peers, utils.arr2chr(sys.argv[2:]))
			
			exit()
		else:
			print('[!!] No peers to command (need creds)')
		used = True

	if '--update' in sys.argv:
		update_all(peers)
		used = True
		exit()

	if '--check-peers' in sys.argv and len(sys.argv) > 1:
		rps = dump_peers(sys.argv[2], peers)
		used = True
		if verbose:
			print(rps)

	if '--check-shares' in sys.argv and len(sys.argv) > 2:
		shares = dump_shares(sys.argv[2], peers)
		used = True
		if verbose:
			print(shares)

	if '--exec' in sys.argv and len(sys.argv) > 3:
		reply = exec_rmt(sys.argv[2], peers, utils.arr2chr(sys.argv[3:]))
		used = True
		print(reply)

	if '--show-commands' in sys.argv and len(sys.argv) > 2:
		print(sys.argv[2])
		reply = list_commands(sys.argv[2], peers)
		used = True
		print(reply)

	if '--file-op' in sys.argv and len(sys.argv) > 4:
		result = query_file(sys.argv[1], peers, sys.argv[3], sys.argv[4])
		used = True
		print(result)

	if '--list-local-peers' in sys.argv:
		for p in peers:
			n = p.split('/')[-1].split('.')[0]
			[h,i,p,m] = utils.load_credentials(n,False)
			print('%s \t %s \t %s' % (n, h, m))

	if not used:
		help()
예제 #3
0
def main():
    if '-gen' in sys.argv:
        A = int(sys.argv[2])
        B = int(sys.argv[3])
        ip_block(A, B)

    # Create a way to run this distributed from python
    elif '-multi-setup' in sys.argv:
        import network
        workers = []
        peers = {}
        # Determine which nodes are available to work
        for n in network.get_node_names():
            name = n.split('/')[-1].split('.')[0]
            h, i, p, m = utils.load_credentials(name, False)
            peers[name] = [h, i, p, m]
            # check if online
            if network.check_connected(h, i, p):
                workers.append(name)
            else:
                print '%s@%s offline' % (name, i)

        # Now distribute the assignments
        print '[*] %d Workers Available' % len(workers)
        for w in workers:
            # give them the latest copy of this program
            H, I, P, M = peers[w]
            rpath = '/home/%s/Documents/PoolParty/code/0.6/' % H
            if utils.remote_file_exists(H, I, P, rpath + 'trace.py'):
                utils.ssh_exec('rm %strace.py' % rpath, I, H, P, False)
            utils.put_file('trace.py', rpath, H, I, P, False)
            # Now give them a ip_space.txt file and a script to run trace.py
            utils.ssh_exec('rm %shops.txt' % rpath, I, H, P, True)
            # c = 'cd %s; python trace.py 0>&-' % rpath
            # put this in a .sh script and transfer, then execute
            # utils.ssh_exec('cd %s; python trace.py 0>&-' % rpath,H,I,P,False)

    elif '-multi-view' in sys.argv:
        import tracewatch
        print('[*] Monitoring Traces...')

    else:
        ips = list(utils.swap('ip_space.txt', False))
        if not os.path.isfile(os.getcwd() + '/hops.txt'):
            os.system('touch hops.txt')
        random.shuffle(ips)
        pool = multiprocessing.Pool(10)
        for ip in ips:
            try:
                event = pool.apply_async(func=trace, args=(ip, False))
                hopdata, nhops = event.get(timeout=60)
                print ' - %d hops to %s' % (nhops, ip)
                open('hops.txt', 'a').write('%s:%d\n' % (ip, nhops))
            except multiprocessing.TimeoutError:
                open('hops.txt', 'a').write('%s:?\n' % ip)
                pass
예제 #4
0
def check_for_messages(nodes, channels):
    n_threads = 15
    pool = multiprocessing.Pool(n_threads)
    # Load Peer Credentials
    peer_list = utils.get_node_names()
    random.shuffle(peer_list)
    # check whether any nodes have new data available
    for node in peer_list:
        n = node.split('/')[-1].split('.')[0]
        host, host_ip, host_pass, host_mac = utils.load_credentials(n, False)
        # check each channel
        for ch in channels.keys():
            if n in channels[ch]:
                print('[*] Checking if %s has %s data' % (n, ch))
예제 #5
0
    def distribute(self):
        # now make sure all remote hosts have share folders for receiving
        for peer in self.peers:
            h, i, p, k = utils.load_credentials(peer, False)
            if utils.remote_file_exists(
                    h, i, p,
                    '/home/%s/PoolParty/code/0.5/PoolData/Shares' % h) == 1:
                # get their shares
                share_names = utils.ssh_exec(
                    'ls /home/%s/PoolParty/code/0.5/PoolData/Shares' % h, i, h,
                    p, False)
                # remote machine needs to hash its shares
                # distribute this peers files too
                # for fs in share_distro[peer]:
                for fs in os.listdir('PoolData/Shares'):
                    recipient = self.distro[fs]
                    rh, ri, rp, rk = utils.load_credentials(recipient, False)
                    f = 'PoolData/Shares/' + fs
                    rf = '/home/%s/PoolParty/code/0.5/PoolData/Shares/' % rh

                    if recipient == peer and utils.remote_file_exists(
                            h, i, p, rf + fs) == 0:
                        print('Giving %s file: %s' % (recipient, fs))
                        # print(rf+fs)
                        utils.put_file(f, rf, rh, ri, rp, True)
                    # else:
                    # 	print('%s has file %s' % (recipient, fs))
            else:
                if utils.remote_file_exists(
                        h, i, p,
                        '/home/%s/PoolParty/code/0.5/PoolData' % h) == 0:
                    utils.ssh_exec(
                        'mkdir /home/%s/PoolParty/code/0.5/PoolData' % h, i, h,
                        p, False)
                utils.ssh_exec(
                    'mkdir /home/%s/PoolParty/code/0.5/PoolData/Shares' % h, i,
                    h, p, False)
예제 #6
0
 def check_disconnected_nodes(self, threadpool, verbose):
     for node in network.get_node_names():
         if node not in self.workers.keys():
             # see if node is now connected
             h, i, p, m = utils.load_credentials(node, False)
             e = threadpool.apply_async(func=network.check_connected,
                                        args=(
                                            h,
                                            i,
                                            p,
                                        ))
             try:
                 self.workers[node]['connected'] = e.get(timeout=5)
             except multiprocessing.context.TimeoutError:
                 self.disconnect_node_from_pool(node)
                 pass
             except IndexError:
                 self.disconnect_node_from_pool(node)
                 pass
             if self.workers[node]['connected']:
                 self.connect_node_to_pool(node)
         elif not self.workers[node]['connected']:
             # see if node is now connected
             h = self.workers[node]['hname']
             i = self.workers[node]['ip']
             p = self.workers[node]['pword']
             m = self.workers[node]['mac']
             print('[!!] checking if %s [%s] might have a new ip...' %
                   (h, m))
             e = threadpool.apply_async(func=network.check_connected,
                                        args=(
                                            h,
                                            i,
                                            p,
                                        ))
             try:
                 self.workers[node]['connected'] = e.get(timeout=5)
             except multiprocessing.context.TimeoutError:
                 self.disconnect_node_from_pool(node)
                 pass
             except IndexError:
                 self.disconnect_node_from_pool(node)
                 pass
             if self.workers[node]['connected']:
                 self.connect_node_to_pool(node)
             else:
                 # check if mac matches another ip in LAN
                 updated_client_table = network.find_missing_nodes()
예제 #7
0
def find_missing_nodes():
    # Look at LAN for whats connected
    # compare to MACs and find new IPs if they match
    nodes = []
    disconnected = []
    for n in get_node_names():
        nodes.append(n.split('/')[-1].split('.')[0])
    nodes = list(set(nodes))  # remove any duplicates if they exist
    node_table = {}
    old_ref = {}
    # Check which of these appear offline (because IP changed)
    for n in nodes:
        h, i, p, m = utils.load_credentials(n, False)
        old_ref[m] = [h, i, m]
        check = False
        try:
            check = check_connected(h, i, p)
        except IndexError:
            pass
        if not check:
            node_table[m] = ''
            disconnected.append(m)
        else:
            node_table[m] = [h, i]

    # fill node_table with new IPs for those MACs if they can be found
    node_ref = {}
    corrected = {}
    print('[*] %d Disconnected Nodes' % len(disconnected))
    if len(disconnected):
        test_scan = utils.cmd('sudo nmap -T5 -Pn 192.168.1.0/24 -p 22', False)
        current_host = ''
        for line in test_scan:
            if len(line.split(' scan report for ')) > 1:
                current_host = line.split(' report for ')[1]
                node_ref[current_host] = ''
            if len(line.split('MAC Address: ')) > 1:
                mac = line.split('MAC Address: ')[1].split(' ')[0]
                # node_ref[mac] = current_host
                if mac in node_table.keys() and mac in disconnected:
                    h, oi, m = old_ref[mac]
                    corrected[mac] = [h, current_host]
                else:
                    corrected[mac] = old_ref[mac]

    return corrected
예제 #8
0
def load_workers():
    nodes = network.get_node_names()
    workers = []
    peers = {}
    # Determine which nodes are available to work
    for n in network.get_node_names():
        name = n.split('/')[-1].split('.')[0]
        h, i, p, m = utils.load_credentials(name, False)
        peers[name] = [h, i, p, m]
        # check if online
        rp = '/home/%s/Documents/PoolParty/code/0.6/hops.txt' % h
        if network.check_connected(h, i, p) and utils.remote_file_exists(
                h, i, p, rp):
            workers.append(name)
        else:
            print '%s@%s offline' % (name, i)
    # Now distribute the assignments
    print '[*] %d Machines Actively Working' % len(workers)
    return workers, peers
예제 #9
0
def test_connections(debug):
    n_threads = 10
    peers = {}
    pool = multiprocessing.Pool(n_threads)
    # Load Peer Credentials
    peer_list = network.get_node_names()
    random.shuffle(peer_list)
    for node in peer_list:
        n = node.split('/')[-1].split('.')[0]
        host, host_ip, host_pass, host_mac = utils.load_credentials(n, debug)
        # test_cnx = utils.ssh_exec('whoami', host_ip, host, host_pass, False).pop()
        event = pool.apply_async(func=utils.ssh_exec,
                                 args=(
                                     'whoami',
                                     host_ip,
                                     host,
                                     host_pass,
                                     False,
                                 ))
        try:
            test_cnx = event.get(timeout=10).pop()
        except multiprocessing.context.TimeoutError:
            test_cnx = ''
            pass
        except IndexError:
            test_cnx = ''
            pass
        peer = {
            'hname': host,
            'ip': host_ip,
            'pword': host_pass,
            'mac': host_mac,
            'connected': False
        }
        if test_cnx.replace('\n', '') == host:
            print('[*] Connected to %s' % node)
            peer['connected'] = True
        else:
            print('[!] Unable to connect to %s' % node)
            # TODO: Search for that MAC address on the network!
        peers[node] = peer
    return peers
예제 #10
0
def test_connections(debug):
    n_threads = 15
    peers = {}
    pool = multiprocessing.Pool(n_threads)
    # Load Peer Credentials
    peer_list = utils.get_node_names()
    random.shuffle(peer_list)
    for node in peer_list:
        n = node.split('/')[-1].split('.')[0]
        host, host_ip, host_pass, host_mac = utils.load_credentials(n, debug)
        peer = {
            'hname': host,
            'ip': host_ip,
            'pword': host_pass,
            'mac': host_mac,
            'connected': False
        }
        event = pool.apply_async(func=utils.ssh_exec,
                                 args=(
                                     'whoami',
                                     host_ip,
                                     host,
                                     host_pass,
                                     False,
                                 ))
        try:
            test_cnx = event.get(timeout=3).pop().replace(
                '\n', '')  # a bit on shorter side
        except Exception:
            test_cnx = ''
            pass
        except IndexError:
            test_cnx = ''
            pass
        # Verify whether command was executed correctly
        if test_cnx == host:
            print('\033[1m\033[33m[*] Connected to %s\033[0m' % node)
            peer['connected'] = True
        elif debug:
            print('\033[1m\033[31m[!] Unable to connect to %s\033[0m' % node)
        peers[n] = peer
    return peers
예제 #11
0
            "text": tweet['text']
        }
        collection.insert(raw_tweet, continue_on_error=True)
    try:
        add_latest_tweets(tweeter_id)
    except:
        sys.stdout.write("cant add tweets")
        return True


if __name__ == '__main__':
    client = pymongo.MongoClient()
    collection = client.dealtrader.raw_tweets
    collection.create_index([("id", pymongo.ASCENDING)], unique=True)

    credentials = load_credentials()
    auth = tweepy_auth(credentials, user=True)
    api = tweepy_api(auth)

    ap = argparse.ArgumentParser()
    ap.add_argument("-id",
                    "--follow_id",
                    required=False,
                    default=False,
                    help="Screen name to follow")
    args = vars(ap.parse_args())
    followid = args['follow_id']

    if followid:
        tweeter_id = return_follow_id(tweeter=followid)
        sys.stdout.write(tweeter_id)
def get_recordings_for_all_users(
    driver_location: str,
    show_driver: bool,
    end_date: str,
    cookies_file: str,
    config_file: str,
    info_file: str,
    output_dir: str,
    user_agent: str,
    user: Optional[str],
    download_duplicates: bool = False,
    system: str = "linux",
) -> None:
    """
    Runs the recording script for all users. If one user errors out, then the script will record the error
    for that user in the correct subdirectory and will move on to the next users.
    The number of users (and their usernames and passwords) are specified in the config file.

    :param driver_location: Location of the WebDriver.
    :param show_driver: Whether to show the driver or run it in the background.
    :param end_date: The earliest date to search for recordings from. The date range of recordings will be from
        this date to the date the script is ran on.
    :param cookies_file: The file location of the previous cookies (if any).
    :param config_file: The credentials file which stores the usernames and passwords for users.
    :param info_file: The file where the recording metadata will be saved.
    :param output_dir: The folder / directory where the recording wav files will be saved.
    :param download_duplicates: Whether the script should find the metadata for recordings that already exist
        within the config file or not.
    :param user: A specific user to run this script for.
    :param user_agent: The user agent of the driver.
    :param system: The OS where the script is running.

    :return: None.
    """
    credentials = load_credentials(credentials_file=config_file, user=user)
    total_users = len(credentials)
    if credentials == load_credentials(
            'credentials.example') or total_users == 0:
        print_log(
            "ERROR: Please modify the credentials.json file and add an account to use."
        )
        return

    today_date = get_today_date_mm_dd_yyyy()
    error_file_name = "errors.json"
    output_dir_name = output_dir.split("/")[-1]

    for i, credentials_for_one_user in enumerate(credentials):
        web_driver = create_driver(
            user_agent=user_agent,
            show=show_driver,
            system=system,
            driver_location=driver_location,
        )
        username = credentials_for_one_user["username"]
        password = credentials_for_one_user["password"]
        print_log(
            f"Working on user #{i+1}: {username} (out of {total_users} users)."
        )
        path_where_recordings_are_saved = get_recording_path(
            date=today_date, output_folder=output_dir_name, username=username)
        try:
            get_recordings(
                driver=web_driver,
                end_date=end_date,
                cookies_file=cookies_file,
                username=username,
                password=password,
                info_file=info_file,
                user_agent=user_agent,
                download_duplicates=download_duplicates,
                system=system,
                path_where_recordings_are_saved=path_where_recordings_are_saved,
            )
            web_driver.quit()
        except Exception as e:
            print_log(
                f"ERROR: The script has errored out for user {username}. These recordings will be skipped. "
                f"You can check the error file ({error_file_name}) for all errors."
            )
            error_dict = {
                username: {
                    "error message": str(e),
                    "full stack message": get_full_stack(),
                }
            }
            save_errors(
                errors=error_dict,
                recording_path=path_where_recordings_are_saved,
                error_file_name=error_file_name,
            )
            web_driver.quit()

        print("\n")
예제 #13
0
            "text": tweet["text"],
        }
        collection.insert(raw_tweet, continue_on_error=True)
    try:
        add_latest_tweets(tweeter_id)
    except:
        sys.stdout.write("cant add tweets")
        return True


if __name__ == "__main__":
    client = pymongo.MongoClient()
    collection = client.dealtrader.raw_tweets
    collection.create_index([("id", pymongo.ASCENDING)], unique=True)

    credentials = load_credentials()
    auth = tweepy_auth(credentials, user=True)
    api = tweepy_api(auth)

    ap = argparse.ArgumentParser()
    ap.add_argument("-id", "--follow_id", required=False, default=False, help="Screen name to follow")
    args = vars(ap.parse_args())
    followid = args["follow_id"]

    if followid:
        tweeter_id = return_follow_id(tweeter=followid)
        sys.stdout.write(tweeter_id)
        tweeter_id = int(tweeter_id)
        add_latest_tweets(tweeter_id)
    else:
        for tweeter in print_follow_list():
예제 #14
0
def main():
    "Loads user spreadsheets and generates project-wise spreadsheets"
    creds = utils.load_credentials()
    drive_api, sheets_api = utils.get_google_apis(creds)

    print('Loading Worksheet Folders...', end=' ')
    user_ids_dict = load_user_folders(drive_api, WORKSHEETS_FOLDER_ID)
    print('Successfully Loaded')

    print()
    print("Loading User Data...")
    user_spreadsheets_dict = load_user_spreadsheets(drive_api, user_ids_dict)

    print()
    print("Importing Spreadsheets...")
    spreadsheets_data = import_spreadsheets(sheets_api, user_spreadsheets_dict)

    print()
    project_ids = get_project_ids(spreadsheets_data)

    print()
    projects_dict, invalid_project_ids = confirm_project_names(project_ids)

    if invalid_project_ids:
        print_rows_with_invalid_ids(spreadsheets_data, invalid_project_ids)
        return

    # SAMPLE CODE TO ADD DATA TO A SPREADSHEET'S DEFAULT SHEET
    # for spreadsheet_name, spreadsheet in spreadsheets_data.items():
    #     print('Uploading spreadsheet', spreadsheet_name.capitalize())
    #     spreadsheet_id = spreadsheet['id']
    #     spreadsheet_body = {
    #         # 'range': project_name + '!A:I',
    #         'range': 'A:I',
    #         'values': spreadsheet['body']
    #     }
    #     (sheets_api.spreadsheets().values()
    #                               .append(spreadsheetId=spreadsheet_id,
    #                                       range='A:I',
    #                                       valueInputOption='RAW',
    #                                       body=spreadsheet_body)
    #                               .execute())

    print()
    print("Processing spreadsheet data...")
    projects_data, current_projects = (format_spreadsheets(
        spreadsheets_data, projects_dict))
    print("Process complete.")

    print()
    print("Creating spreadsheets...")
    spreadsheet_ids = generate_spreadsheets(drive_api, sheets_api,
                                            spreadsheets_data,
                                            current_projects)

    print()
    print("Generating records...")
    generated_rows = generate_rows(projects_data)

    print()
    print("Inserting records...")
    insert_records(sheets_api, spreadsheet_ids, generated_rows,
                   current_projects)

    print()
    print("All done!")
예제 #15
0
    return statuses

 
parser = argparse.ArgumentParser()
parser.add_argument("--nb_days", type=int, default=1)
parser.add_argument("user_names", type=str, help="Path of text file containing Twitter user screen names (start with '@'), one per line")
parser.add_argument("path_credentials", type=str, help="Path of config file containing Twitter API credentials")
parser.add_argument("path_output", type=str, help="Path of output file")
args = parser.parse_args()
 
# Check args
if args.nb_days < 1:
    raise ValueError("nb_days must be positive")
 
# Load credentials
cred = utils.load_credentials(args.path_credentials)
 
# Set date and time where we stop (morning of the day args.nb_days
# prior to today, which I will arbitrarilty set to 12:00 PM UTC, which
# is 7:00 AM EST). 
now = datetime.datetime.now()
delta = datetime.timedelta(days=args.nb_days)
then = now - delta
that_morning = datetime.datetime(then.year, then.month, then.day, hour=12, tzinfo=datetime.timezone.utc)
 
# Get API instance. Make the API wait when it hits rate limits.
auth = tweepy.OAuthHandler(cred["consumer_key"], cred["consumer_secret"])
auth.set_access_token(cred["access_token"], cred["access_secret"])
api = tweepy.API(auth, wait_on_rate_limit=True)

# Get user names