Exemple #1
0
 def handle(self, cmd):
     if cmd == "" or cmd == " ":
         return None
     try:
         base = cmd.split(" ")[0]
     except:
         base = cmd
     if base == "help":
         print(help_menu())
         return None
     elif base == "exit":
         sure = input("Are you sure? %s/%s: " %
                      (c("y", "green"), c("N", "red"))).lower()
         if not sure == "y":
             return None
         exit_sequence()  #TODO
         return "exit"
     elif base == "man":
         try:
             helpobj = cmd.split(" ")[1]
         except:
             print(error("man: Requires one argument"))
             helpobj = "man"
         if not helpobj in man:
             print(error("man: Command not found"))
             return None
         print("%s : %s" % (helpobj, c(man[helpobj], "green")))
         return None
     else:
         print(error("Command not found / Invalid syntax"))
         return None
Exemple #2
0
    def conflict(self, data):
        """
        Handle conflicting and duplicate match data.

        :param data: Scouting data to search for duplicates and conflicts.
        """
        remove = []
        if len(data) > 1:
            for i in range(1, len(data)):
                pre = data[i]
                cur = data[i-1]
                if pre == cur:
                    remove.append(i-1)
                elif pre['match'] == cur['match'] and pre['team'] == cur['team']:
                    print(c('Team #%s was scouted differently twice in match #%s. Here are the two data:' % (pre['team'], cur['match']), 'red'))
                    print(('%s: ' % (i-1)) + str(pre))
                    print(('%s: ' % i) + str(cur))
                    choice = int(input(c('Please select the data you want to remove by typing 1 or 2: ', 'blue')))
                    remove.append(i + (choice - 2))

        for index in reversed(remove):
            print('Removing duplicate data point %s...' % index)
            del data[index]

        print(c('All conflicts and duplication eliminated.' if len(remove) else 'No duplicate or conflicting data found.', 'green'))

        return data
Exemple #3
0
def update_con(object):
    '''
    updates any field within contacts.
    will update field, create new contact, delete old contact
    '''
    while True:
        object.view_contacts()
        name = input('Name: ').strip().lower()
        cur_con = object.get_contact(name)
        prev_name = name
        food = cur_con['fav_food']
        color = cur_con['fav_color']
        print(cur_con[name]['name'], cur_con['fav_food'], cur_con['fav_color'])
        print('what would you like to update?\n', c('N', 'red'), 'ame\n',
              c('F', 'red'), 'ood\n', c('C', 'red'), 'olor')
        ch = input().strip().lower()
        if ch.startswith('n'):
            name = input('New name: ').strip().lower()
        elif ch.startswith('f'):
            food = input('New favorite food: ').strip().lower()
        elif ch.startswith('c'):
            color = input('New favorite color: ').strip().lower()
        else:
            print('incorrect input')
    cur_con.remove_contact(prev_name)
    object.update_contacts(name, food, color)

    food = input('New favorite food: ').strip().lower()
    color = input('New favorite color: ').strip().lower()
    print(name, food, color)
    ch = input('is this correct? y/n\n').strip().lower()
    if ch.startswith('y'):
        object.update_contacts(name, food, color)
Exemple #4
0
def yorn(msg):
    pretty = c("y", "green") + "/" + c("n", "red")
    ch = input(c("[=] ", "yellow") + msg + "? [%s]: " % pretty).lower()
    while not ch == "y" or ch == "n":
        ch = input(c("[=] ", "yellow") + msg + "? [%s]: " % pretty).lower()
    if ch == "y": return True
    elif ch == "n": return False
Exemple #5
0
def print_banner():
    with open("libs/logo.txt", "r") as btxt:
        logo = c(btxt.read(), "blue")
        name = author.name
        github = author.github
        print(logo + "\n\tAuthor: %s\n\tGithub: %s" %
              (c(name, "green"), c(github, "green")))
Exemple #6
0
def get_local_ip_information():
    """
    Get local IP Address
    """
    print(c("---", cyan))
    print(c("Your current IP Address is: ", white, attrs=['bold']))
    os.system("wget -O - -q icanhazip.com")
    print(c("---", cyan))
Exemple #7
0
def print_results(results):
    """
    Print out results of group operations
    """
    for r in results:
        connection = results[r]
        print(c("---", yellow, attrs=['bold']))
        print(f"{c(r.host, white, attrs=['bold'])}")
        print(c("---", yellow, attrs=['bold']))
        print(results[r])
Exemple #8
0
def main():
    passFile = open('password.txt', 'r')
    for line in passFile.readlines():
        if ':' in line:
            user = line.split(':')[0]
            cryptWord = line.split(':')[1].strip(' ').strip('\n')
            print(c("[*] Cracking Password For: " + user, 'blue'))
            crackPass(cryptWord)
            if crackPass(cryptWord):
                quit()
            else:
                print(c('[-]Password not found!', 'red'))
def main():
    global listen
    global port
    global execute
    global command
    global upload_Destination
    global target

    if not len(sys.argv[1:]):
        usage()
    # Read command line options
    try:
        opts, args = getopt.getopt(sys.argv[1:], "hle:t:p:cu:", [
            "help", "listen", "execute", "target", "port", "command", "upload"
        ])
    except getopt.GetoptError as err:
        print c(str(err), "white", "on_red")
        usage()

    for o, a in opts:
        if o in ("-h", "--help"):
            usage()
        elif o in ("-l", "--listen"):
            listen = True
        elif o in ("-e", "--execute"):
            execute = a
        elif o in ("-c", "--commandshell"):
            command = True
        elif o in ("-u", "--upload"):
            upload_Destination = a
        elif o in ("-t", "--target"):
            target = a
        elif o in ("-p", "--port"):
            port = int(a)
        else:
            assert False, "Unhandled Option"

    # Are we going to listen or just send data from stdin?
    if not listen and len(target) and port > 0:

        # Read in the buffer from commandline
        # this will block, so send CTRL-D if not sending input
        # to stdin
        buffer = sys.stdin.read()

        # Send data off
        client_sender(buffer)

    # We are going to listen ant potentially
    # upload things, execute commands, and drop a shell back
    # depending on our command line options above
    if listen:
        server_loop()
Exemple #10
0
	def handle(self, cmd):
		if cmd == "exit":
			return "exit"
		elif cmd == "help":
			p = []
			for i in climan:
				p.append("%s : %s" % (c(i,"green"),climan[i]))
			print("\n".join(p))
		elif cmd == "info":
			for i in self.srv.clients:
				if self.srv.clients[i] == self.cli:
					cliname = i
			res = self.srv.getclientinfo(cliname)
			for i in res:
				print(c(i,"green")+" : "+res[i])
		elif ' ' in cmd:
			parts = cmd.split(" ")
			if parts[0] == "man":
				try:
					q = parts[1]
					print("%s : %s" % (c(q,"green"),climan[q]))
				except Exception as e:
					alert(str(e))
			elif parts[0] == "sh":
				try:
					cm = '"'.join(cmd.split('"')[1:-1])
					res = utils.force_decode(self.cli.sh(cm).strip())
					print(res)
				except Exception as e:
					alert(e)
			elif parts[0] == "dl":
				try:
					fn = parts[1]
					of = parts[2]
					dat = self.cli.dl(fn)
					with open(of,"wb") as f:
						f.write(dat)
					plus("Success")
				except Exception as e:
					alert(str(e))
			elif parts[0] == "cd":
				dr = parts[1]
				if self.cli.cd(dr):
					plus("Okay")
				else:
					alert("Error")
			else:
				alert("Unknown command")
		else:
			alert("Unknown command")
def print_evaluation(targets, predictions, dict):
    p, r, f = get_pricision_recall_fscore(targets, predictions, list(dict.values()))

    avg_p = 0
    avg_r = 0
    avg_f = 0
    for k in list(dict.values()):
        key = [key for key, value in dict.items() if value == k][0]
        cprint("Key: " + c(("  " + key)[-5:], 'red') +
               "\tPrec: " + c("  {:.1f}".format(p[k] * 100)[-5:], 'green') + '%' +
               "\tRecall: " + c("  {:.1f}".format(r[k] * 100)[-5:], 'green')  + '%' +
               "\tF-Score: " + c("  {:.1f}".format(f[k] * 100)[-5:], 'green'))
        avg_p = avg_p + p[k]
        avg_r = avg_r + r[k]
        avg_f = avg_f + f[k]
    return avg_p / 3, avg_r / 3, avg_f / 3
Exemple #12
0
    def consolidate(self, directory):
        """
        Load all JSON files in a directory and consolidate them into one file.

        :param directory: Directory to find JSON files in.
        """
        # TODO: Unbundle all file I/O from this method.
        files = [f for f in os.listdir(directory) if f.endswith('.json')]
        if len(files) and not files[0].endswith('data.json'):
            matches = []

            for f in files:
                print('Parsing %s...' % f)
                try:
                    matches += self.load(f)
                except json.decoder.JSONDecodeError:
                    files.remove(f)
                    print(c('File \'%s\' has parsing errors. Resolve and run again.' % f.split('/')[-1], 'red'))

            for f in files:
                os.remove(f)

            matches.sort(key=lambda match: (match['team'], match['match']))

            return matches
        else:
            return []
def print_pc(o, target):
    """returns: 
        p<recision>, 
        r<ecall>, 
        f<-score>, 
        {"true_p", "p", "all_p"} """
    p, r, _ = precision_recall(o, target)
    f = F_score(p, r)

    for k in p.keys():
        cprint("Key: " + c(("  " + k)[-5:], 'red') +
               "\tPrec: " + c("  {:.1f}".format(p[k] * 100)[-5:], 'green') + '%' +
               "\tRecall: " + c("  {:.1f}".format((r[k] if k in r else 0) * 100)[-5:], 'green') + "%" +
               "\tF-Score: " + ("  N/A" if f[k] is None else (c("  {:.1f}".format(f[k] * 100)[-5:], "green") + "%"))
               )
    return p, r, f, _
def tvshow_imdb_search():
    for t in tvshow_titles:
        title_url = imdb.perform_search(t)

        if bool(title_url):
            print(c("IMDb getting info for", "yellow") + f" \"{t}\"\n")
            imdb.get_tv_show_info(title_url)
Exemple #15
0
    def log_params(self, path="parameters.pkl", **kwargs):
        key_width = 30
        value_width = 20

        _kwargs = {}
        table = []
        for n, (title, section_data) in enumerate(kwargs.items()):
            table.append('═' * (key_width) + ('═' if n == 0 else '╧') + '═' *
                         (value_width))
            table.append(c('{:^{}}'.format(title, key_width), 'yellow') + "")
            table.append('─' * (key_width) + "┬" + '─' * (value_width))
            if not hasattr(section_data, 'items'):
                table.append(section_data)
                _kwargs[title] = metrify(section_data)
            else:
                _param_dict = {}
                for key, value in section_data.items():
                    _param_dict[key] = metrify(
                        value.v if type(value) is Color else value)
                    value_string = str(value)
                    table.append('{:^{}}'.format(key, key_width) + "│" +
                                 '{:<{}}'.format(value_string, value_width))
                _kwargs[title] = _param_dict

        if "n" in locals():
            table.append('═' * (key_width) + ('═' if n == 0 else '╧') + '═' *
                         (value_width))

        # todo: add logging hook
        # todo: add yml support
        self.print('\n'.join(table))
        self.log_data(path=path, data=_kwargs)
Exemple #16
0
    def log_params(self, path="parameters.pkl", **kwargs):
        key_width = 30
        value_width = 20

        table = []
        for n, (title, section_data) in enumerate(kwargs.items()):
            table.append((title, ""))
            self.log_line('═' * (key_width + 1) + ('═' if n == 0 else '╧') + '═' * (value_width + 1))
            self.log_line(c('{:^{}}'.format(title, key_width), 'yellow'))
            self.log_line('─' * (key_width + 1) + "┬" + '─' * (value_width + 1))
            if not hasattr(section_data, 'items'):
                self.log_line(section_data)
            else:
                for key, value in section_data.items():
                    value_string = str(value)
                    table.append((key, value_string))
                    self.log_line('{:^{}}'.format(key, key_width), "│",
                                  '{:<{}}'.format(value_string, value_width))

        if "n" in locals():
            self.log_line('═' * (key_width + 1) + ('═' if n == 0 else '╧') + '═' * (value_width + 1))

        # todo: add logging hook
        # todo: add yml support
        self.log_pkl(path=path, data=kwargs)
Exemple #17
0
	def handle(self, cmd):
		if cmd == "exit":
			return "exit"
		elif cmd == "help":
			p = []
			for i in man:
				p.append("%s : %s" % (c(i,"green"),man[i]))
			print("\n".join(p))
		elif ' ' in cmd:
			parts = cmd.split(" ")
			if parts[0] == "man":
				try:
					q = parts[1]
					print("%s : %s" % (c(q,"green"),man[q]))
				except Exception as e:
					alert(str(e))
			elif parts[0] == "show":
				try:
					if parts[1] == "clients" or parts[1] == "client":
						print(c("Clients:"), ', '.join(srv.clients))
				except Exception as e:
					alert(str(e))
			elif parts[0] == "broadcast":
				if not '"' in cmd:
					alert("Command needs to be in double quotation marks")
				try:
					cm = '"'.join(cmd.split('"')[1:-1])
					res = srv.broadcast_sh(cm)
					for i in res:
						print(c(i,"green"),":",res[i].strip())
				except Exception as e:
					alert(str(e))
			elif parts[0] == "interact":
				try:
					if not parts[1] in srv.clients:
						alert("Client [%s] not in client list" % parts[1])
					else:
						hnd = ClientHandler(srv.clients[parts[1]],srv)
						shell.start_shell(hnd)
				except Exception as e:
					alert(str(e))
			else:
				alert("Unknown command")
		else:
			alert("Unknown command")
Exemple #18
0
def crackPass(cryptWord):
    salt = cryptWord[:2]
    dictionary = open("dictionary.txt", 'r')
    for word in dictionary.readlines():
        word = word.strip('\n')
        cryptPass = crypt.crypt(word, salt)
        if cryptWord == cryptPass:
            print(c("[+] Found password: " + word, 'green'))
            return True
Exemple #19
0
    def log_params(self, path="parameters.pkl", **kwargs):
        """
        Log namespaced parameters in a list.

        Examples:

            ::
                logger.log_params(some_namespace=dict(layer=10, learning_rate=0.0001))

            generates a table that looks like:

            ::
                ══════════════════════════════════════════
                   some_namespace
                ────────────────────┬─────────────────────
                       layer        │ 10
                   learning_rate    │ 0.0001
                ════════════════════╧═════════════════════

        :param path: the file to which we save these parameters
        :param kwargs: list of key/value pairs, each key representing the name of the namespace,
                       and the namespace itself.
        :return: None
        """
        from termcolor import colored as c
        key_width = 20
        value_width = 20

        _kwargs = {}
        table = []
        for n, (title, section_data) in enumerate(kwargs.items()):
            table.append('═' * (key_width) + ('═' if n == 0 else '╧') + '═' *
                         (value_width + 1))
            table.append(c('{:^{}}'.format(title, key_width), 'yellow') + "")
            table.append('─' * (key_width) + "┬" + '─' * (value_width + 1))
            if not hasattr(section_data, 'items'):
                table.append(section_data)
                _kwargs[title] = metrify(section_data)
            else:
                _param_dict = {}
                for key, value in section_data.items():
                    _param_dict[key] = metrify(
                        value.v if type(value) is Color else value)
                    value_string = str(value)
                    table.append('{:^{}}'.format(key, key_width) + "│ " +
                                 '{:<{}}'.format(value_string, value_width))
                _kwargs[title] = _param_dict

        if "n" in locals():
            table.append('═' * (key_width) + '╧' + '═' * (value_width + 1))

        # todo: add logging hook
        # todo: add yml support
        if table:
            self.log_line(*table, sep="\n")
        self.log_data(path=path, data=_kwargs)
def perform_search(title):
    """ Performs search for 'title'
    and tries to match results to 'title'
    returns url of best match
    """
    print(c("IMDb searching for", "yellow") + f" \"{title}\"")

    title_encoded = title.replace(" ", "+")

    search_page = requests.get(
        f"https://www.imdb.com/find?ref_=nv_sr_fn&q={title_encoded} \
    &s=all",
        headers=headers)

    soup = BeautifulSoup(search_page.text, "html.parser")
    title_nodes = soup.find_all("td", class_="result_text")

    print(F"Found {len(title_nodes)} results\n")

    best_match = 0
    best_match_title = ""
    best_match_url = ""

    for n in title_nodes:
        node = next(iter(n.select("a[href*='title']")), None)

        if node:
            title_text = unidecode(node.text)
            match_ratio = fuzz.token_set_ratio(title, title_text)

            print(
                c(F"Matching ", "magenta") + F"\"{title_text}\" = " +
                c(F"{match_ratio}", "green"))

            if match_ratio > best_match:
                best_match = match_ratio
                best_match_title = title_text
                best_match_url = node["href"]

    print(c("\nBest Match", "green") + f" {best_match_title}\n")

    return "https://imdb.com" + best_match_url
Exemple #21
0
def addHost(network):
    '''
    Get the input information for a host on the network
    '''
    while True:
        ip = input(c('IP: ', COLOR, attrs=('bold', )) + network + ".")
        hostname = prompt("Hostname: ")
        os = prompt("OS: ")
        isGood = prompt("Correct? [Y/n]", "green")
        if isGood in ("", "y", "yes"):
            # If its right, return the host info
            return {'ip': ip, 'name': hostname, 'os': os}
Exemple #22
0
def extract_instances_information(context, environment, filter, tags=None):
    """
    Get information from instances and create hosts pool
    """
    default_attributes = [
        'Name',
        'private_ip_address',
        'public_ip_address',
        'instance_type'
    ]
    attributes = default_attributes if tags is None else tags.split(',')

    # get instances with filter
    instances = get_hosts_from_ec2(environment, filter)

    instance_name = None
    if len(attributes) is 1:
        results = []
    else:
        results = {}

    for instance in instances:
        # gather data from tags
        for tag in instance.tags:
            # set other attributes
            if tag['Key'] in attributes:
                if tag['Key'] == 'Name':
                    instance_name=tag['Value']
                    instance_id=getattr(instance, 'id')
                    instance_name = f'{instance_name}[{instance_id}]'

                # print(tag['Value'])
                results.update({instance_name: {tag['Key']: tag['Value']}})

        # gather data from attributes
        # figure out what properties the instance object has
        # print(instance.__dict__.keys())
        instance_properties = [i for i in dir(instance) if not callable(i)]
        # print(instance_properties)
        for attribute in attributes:
            if attribute in instance_properties:
                if isinstance(results, list):
                    results.append(getattr(instance, attribute))
                else:
                    results[instance_name].update({attribute: getattr(instance, attribute)})

    # print hosts information
    print(f"Information for attributes: {c(attributes, white, attrs=['bold'])}")
    number_of_hosts = len(results)
    print(c(f"Found {number_of_hosts} host(s)", white, attrs=['bold']))
    print(json.dumps(results, sort_keys=True, indent=4))

    return results
Exemple #23
0
def logo():
    print(
        c(
            "██████╗ ███╗   ███╗██╗     ██████╗ █████╗ ██╗      ██████╗██╗   ██╗██╗      █████╗ ████████╗ ██████╗ ██████╗ ",
            "red"))
    print(
        c(
            "██╔══██╗████╗ ████║██║    ██╔════╝██╔══██╗██║     ██╔════╝██║   ██║██║     ██╔══██╗╚══██╔══╝██╔═══██╗██╔══██╗",
            "yellow"))
    print(
        c(
            "██████╔╝██╔████╔██║██║    ██║     ███████║██║     ██║     ██║   ██║██║     ███████║   ██║   ██║   ██║██████╔╝",
            "green"))
    print(
        c(
            "██╔══██╗██║╚██╔╝██║██║    ██║     ██╔══██║██║     ██║     ██║   ██║██║     ██╔══██║   ██║   ██║   ██║██╔══██╗",
            "cyan"))
    print(
        c(
            "██████╔╝██║ ╚═╝ ██║██║    ╚██████╗██║  ██║███████╗╚██████╗╚██████╔╝███████╗██║  ██║   ██║   ╚██████╔╝██║  ██║",
            "blue"))
    print(
        c(
            "╚═════╝ ╚═╝     ╚═╝╚═╝     ╚═════╝╚═╝  ╚═╝╚══════╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝  ╚═╝   ╚═╝    ╚═════╝ ╚═╝  ╚═╝",
            "magenta"))
Exemple #24
0
def ascii_print():
    message = input('What message do you want to print? ')

    while True:
        try:
            colour = input('What colour? ')
            answer = c(f(message), colour)
            break
        except KeyError:
            print(
                'Colour is not valid. Please choose from red, green, yellow, blue, magenta, cyan, white'
            )
    return print(answer)
Exemple #25
0
    def print_maze(self, maze):

        if self.process_option == 'Fast':
            condition = False

        else:
            condition = True

        if condition:
            if self.steps != 0:
                print(f'\nInd name   : {self.ind_name}')
                print(f"Before     : {self.before}")
                print(f"Actual     : {self.actual}")
                if self.ia_flag:
                    print(f'Key Pressed: {self.ia_steps[self.ia_steps_count]}')

            print(f'Steps      : {self.steps}')
            for line in maze:
                for cell in line:

                    if cell == 'E' or cell == 'S':
                        print(c(cell, 'white'), end=' ')

                    elif cell == '1':
                        print(c(cell, 'red'), end=' ')

                    elif cell == '0':
                        print(c(cell, 'green'), end=' ')

                    elif cell == '@':
                        print(c(cell, 'yellow'), end=' ')

                print()
            print()

            #time.sleep(self.sleeper)

        else:
            pass
Exemple #26
0
    def log_params(self, **kwargs):
        # ======================================
        #        ALTERNATIVE IMPLEMENTATION
        # --------------------------------------
        # value = "Random text"
        # text_tensor = tf.make_tensor_proto(value, dtype=tf.string)
        # meta = tf.SummaryMetadata()
        # meta.plugin_data.plugin_name = "text"
        # summary = tf.Summary()
        # summary.value.add(tag="whatever", metadata=meta, tensor=text_tensor)
        # summary_writer.add_summary(summary)
        # --------------------------------------

        key_width = 30
        value_width = 20

        table = []
        for n, (title, section_data) in enumerate(kwargs.items()):
            table.append((title, ""))
            print('═' * (key_width + 1) + f"{'═' if n == 0 else '╧'}" + '═' *
                  (value_width + 1))
            print(c(f'{title:^{key_width}}', 'yellow'))
            print('─' * (key_width + 1) + "┬" + '─' * (value_width + 1))
            for key, value in section_data.items():
                value_string = str(value)
                table.append((key, value_string))
                print(c(f'{key:^{key_width}}', 'white'), "│",
                      f'{value_string:<{value_width}}')

        if n > 0:
            print('═' * (key_width + 1) + f"{'═' if n == 0 else '╧'}" + '═' *
                  (value_width + 1))

        table_tensor = tf.convert_to_tensor(table, dtype=tf.string)
        summary_op = tf.summary.text('experiment_parameters', table_tensor)
        default_sess = tf.get_default_session() or tf.Session()
        with default_sess as sess:
            self.summary_writer.add_summary(sess.run(summary_op), 0)
def client_sender(buffer):

    client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    try:
        # Connect to target host
        client.connect((target, port))

        if len(buffer):
            client.send(buffer)

        while True:
            # Wait for data back
            recv_len = 1
            response = ""

            while recv_len:
                data = client.recv(4096)
                recv_len = len(data)
                response += data

                if recv_len < 4096:
                    break
            print response,

            # Wait for more input
            buffer = raw_input("")
            buffer += "\n"

            # Send it off
            client.send(buffer)

    except:
        print c("[!] Exception! Exiting.", "white", "on_red")

        # Tear down connection
        client.close()
Exemple #28
0
 def player_in_board(self):
     
     # print(f"Player 1 positions: x -> {self.p1.x}    y -> {self.p1.y}")
     # print(f"Player 2 positions: x -> {self.p2.x}    y -> {self.p2.y}\n")
     
     if self.p1.y > 49:
         self.p1.y = 49
         self.p1.win = True
         
     if self.p2.y > 49:
         self.p2.y = 49
         self.p2.win = True
         
     self.board[self.p1.prev_x][self.p1.prev_y].colored_icon = c(0, self.board[self.p1.prev_x][self.p1.prev_y].color)
     self.board[self.p2.prev_x][self.p2.prev_y].colored_icon = c(0, self.board[self.p1.prev_x][self.p1.prev_y].color)
     
     if self.board[self.p1.x][self.p1.y].color == 'red':
         print("Você caiu em uma casa vermelha! Volte uma posição")
         self.p1.y -= 1
     
     elif self.board[self.p1.x][self.p1.y].color == 'green':
         print("Você caiu em uma casa verde! Avance uma posição" )
         self.p1.y += 1
     
     if self.board[self.p2.x][self.p2.y].color == 'red':
         self.p2.y -= 1
     
     elif self.board[self.p2.x][self.p2.y].color == 'green':
         self.p2.y += 1
         
     self.board[self.p1.x][self.p1.y].colored_icon = c(self.p1.player, 'cyan')
     self.board[self.p2.x][self.p2.y].colored_icon = c(self.p2.player, 'magenta')
     
     self.p1.prev_x = self.p1.x
     self.p1.prev_y = self.p1.y
     self.p2.prev_x = self.p2.x
     self.p2.prev_y = self.p2.y
def pretty_doc(name_fn_map=None, groups=None, docstrings=True, exclude_vars=set()):
    s = []
    for name, func in name_fn_map.items():
        if name in groups:
            s.append('\n%s\n%s' % (groups[name], '-'*len(groups[name])))
        args, kwargs = extract_fn_arguments(func)
        kw = []
        for arg in args:
            if arg not in exclude_vars:
                kw.append(c(arg, 'yellow'))
        for arg, val in kwargs.items():
            if arg not in exclude_vars:
                kw.append('%s=%s' % (c(arg, 'yellow'), c(val, 'magenta') ))
        
        fn_name = '%s' % c(name, 'green')
        fn_args = ''
        for k in kw:
            arg = '  ' + k
            if len(fn_args.split('\n')[-1]) > 105:
                fn_args += '\n' + (' '*len(name))
            fn_args += arg
        
        s.append(fn_name + fn_args)
        
        if docstrings:
            docfn = getattr(func, '__docstring__', func.__doc__)
            docstring = docfn or ''
    
            if docstring:
                lines = docstring.split('\n')
                if len(lines) > 1:
                    spaces = len(lines[1]) - len(lines[1].lstrip())
                    r = re.compile(r"^\s{%d}" % spaces, re.MULTILINE)
                    docstring = r.sub("  ", docstring)
                docstring = "  " + docstring.strip()
                s.append(docstring)
    return '\n'.join(s)
    [0, 1., 0],
    [0, 0, 1.],
    [0, 0, 1.],
    [0, 0, 1.]
])

features_expanded = tf.reshape(deep_features, shape=[-1, 2, 1])
label_expanded = tf.reshape(labels, shape=[-1, 1, 3])

samples_per_label = tf.reduce_sum(
    label_expanded,
    reduction_indices=[0]
)

centroids = \
    tf.reduce_sum(
        tf.reshape(deep_features, shape=[-1, 2, 1]) * \
        label_expanded,
        reduction_indices=[0]
    ) / samples_per_label

spread = tf.reduce_mean(
    tf.square(
        features_expanded * label_expanded - tf.reshape(centroids, shape=[1, 2, 3])
    )
)

with tf.Session() as sess:
    spread_output = sess.run([spread])
    cprint(c(spread_output, 'red'))
def forward_tracer(self, input, output):
    _cprint(c("--> " + self.__class__.__name__, 'red') + " ===forward==> ")
        init = tf.initialize_all_variables()

        with tf.Session(config=session_config) as sess, \
                h5py.File(DUMP_FILE, 'a', libver='latest', swmr=True) as h5_file:
            # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
            # to see the tensor graph, fire up the tensorboard with --logdir="./train"
            all_summary = tf.merge_all_summaries()
            train_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/train', sess.graph)
            test_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/test')

            saver = tf.train.Saver()

            if RESTORE:
                try:
                    saver.restore(sess, SAVE_PATH)
                    cprint(c('successfully loaded checkpoint file.', 'green'))
                except ValueError:
                    cprint(c('checkpoint file not found. Moving on to initializing automatically.', 'red'))
                    sess.run(init)
            else:
                sess.run(init)

            step = global_step.eval()
            cprint(c('global step starts at:', 'grey') + c(step, 'red'))

            for i in range(TOTAL_STEPS + 1):
                batch_xs, batch_labels = mnist.train.next_batch(BATCH_SIZE)
                if i % 50 == 0:
                    eval_labels = mnist.test.labels[:5000]
                    eval_images = mnist.test.images[:5000]
                    summaries, step, logits_outputs, deep_features_outputs, loss_value, accuracy, centroids_output = \
Exemple #33
0
def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
  print((c(text, color, on_color, attrs)), **kwargs)
def backward_tracer(self, input, output):
    cprint(c("--> " + self.__class__.__name__, 'red') + " ===backward==> ")
            for i in range(20000):
                batch_xs, batch_labels = mnist.train.next_batch(BATCH_SIZE)
                if i % 50 == 0:
                    eval_labels = mnist.test.labels[:5000]
                    eval_images = mnist.test.images[:5000]
                    summaries, step, logits_outputs, deep_features_outputs, loss_value, accuracy = \
                        sess.run(
                            [all_summary, global_step, logits, deep_features, loss_op, eval], feed_dict={
                                input: eval_images,
                                labels: eval_labels
                            })
                    test_writer.add_summary(summaries, global_step=step)

                    cprint(
                        c("#" + str(i), 'grey') +
                        c(" training accuracy", 'green') + " is " +
                        c(accuracy, 'red') + ", " +
                        c("loss", 'green') + " is " +
                        c(loss_value, 'red')
                    )

                    cprint(c('logits => ', 'yellow') + str(logits_outputs[0]))

                    group = h5_file.create_group('step_{}'.format(str(1000000 + step)[-6:]))
                    group.create_dataset('deep_features', data=deep_features_outputs)
                    group.create_dataset('logits', data=logits_outputs)
                    group.create_dataset('target_labels', data=eval_labels)

                if i % 500 == 0 and (accuracy > 0.6):
                    saver.save(sess, SAVE_PATH)
- MNIST
- 

`transforms` contains data transformation functions.

`transforms.ToTensor()` => convert number to torch tensors.
`transforms.Normalize(mu1, mu2)` => normalizes the batch by these centroids.


"""
import torch
from torchvision import datasets, transforms

train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../mnist_data', train=False, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=100, shuffle=True
)

from termcolor import cprint, colored as c

for batch_idx, (data, target) in enumerate(train_loader):
    cprint(
        c('batch data has the shape of: ', 'grey') +
        c(str(data), 'green')
    )
    break
    )


config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess, tf.device('/gpu:0'):
    xys = get_locations(21)
    interactive_energy = energies.total(xys, static)

    step_size = tf.placeholder(dtype=tf.float32)
    train_op = train(step_size, interactive_energy)

    init = tf.initialize_all_variables()
    # all_summaries = tf.merge_all_summaries()

    sess.run(init)

    total_steps = 70000
    for i in range(total_steps + 1):
        tick = time.clock()
        sess.run(train_op,
                 feed_dict={step_size: 1e-1 * np.min([12.5 * i, 700, 0.01 * (total_steps - i)]) / total_steps})

        if i % 100 == 0:
            lapsed = (time.clock() - tick) / 100.
            current_xys, interactive_energy_result = sess.run([xys, interactive_energy])
            cprint(c('{}sec '.format(str(lapsed)[:7]), 'yellow') + c('#{} '.format(i), 'red') + c('interactive_energy_result ', 'grey') + c(
                interactive_energy_result, 'green') + ' eV')

            # with open('dumps/xys_{}.dump.pkl'.format(str(1000000 + i)[-6:]), 'wb') as f:
            #     pickle.dump(current_xys, f)
        eval = network.evaluation(logits, labels)

        init = tf.initialize_all_variables()

        with tf.Session(config=session_config) as sess:
            # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
            # to see the tensor graph, fire up the tensorboard with --logdir="./train"
            all_summary = tf.merge_all_summaries()
            train_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/train', sess.graph)
            test_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/test')

            saver = tf.train.Saver()

            try:
                saver.restore(sess, SAVE_PATH)
                cprint(c('successfully loaded checkpoint file.', 'green'))
            except ValueError:
                cprint(c('checkpoint file not found. Moving on to initializing automatically.', 'red'))
                sess.run(init)
            # sess.run(init)

            for i in range(500000):
                batch_xs, batch_labels = mnist.train.next_batch(BATCH_SIZE)
                accuracy = 0
                if i % 100 == 0:
                    summaries, step, logits_output, loss_value, accuracy = \
                        sess.run(
                            [all_summary, global_step, logits, loss_op, eval],
                            feed_dict={
                                input: mnist.test.images[:5000],
                                labels: mnist.test.labels[:5000]
matplotlib.use('Agg')
import matplotlib.pyplot as plt


def train(step, loss):
    global_step = tf.Variable(0, name='global_step', dtype=tf.int32, trainable=False)
    optimizer = tf.train.GradientDescentOptimizer(step)
    train_op = optimizer.minimize(loss, global_step=global_step)
    return train_op


with tf.Session() as sess:
    x = tf.Variable(10, name='x', dtype=tf.float32)
    energy = 0.5 * x ** 2
    train_op = train(0.01, energy)

    init = tf.initialize_all_variables()
    sess.run(init)

    results = sess.run([energy])
    cprint(c('initial total is ', 'grey') + c(results[0], 'green'))

    decay = []
    for i in range(500):
        results = sess.run([energy, train_op])
        # cprint(c('initial total is ', 'grey') + c(results[0], 'green'))

        decay.append(results[0])
    plt.plot(decay)
    plt.savefig('figures/1D_pendulum_energy_decay.png', dpi=300, bbox_inches='tight')
from termcolor import cprint, colored as c
from test_module import constant

cprint(c(constant, 'red') + c(' this is green', 'green'))
            #     sess.run(init)
            sess.run(init)

            for i in range(500000):
                batch_xs, batch_labels = mnist.train.next_batch(BATCH_SIZE)
                if i % 100 == 0:
                    summaries, current_step, logits_output, loss_value, accuracy = \
                        sess.run(
                            [all_summary, global_step, logits, loss_op, eval],
                            feed_dict={
                                input: mnist.test.images,
                                labels: mnist.test.labels
                            })
                    test_writer.add_summary(summaries, global_step=current_step)
                    cprint(
                        c("#" + str(i), 'grey') +
                        c(" training accuracy", 'green') + " is " +
                        c(accuracy, 'red') + ", " +
                        c("loss", 'green') + " is " +
                        c(loss_value, 'red')
                    )
                    print('logits => ', logits_output[0])

                if i % 500 == 0:
                    saver.save(sess, SAVE_PATH)
                    print('=> saved network in checkfile.')

                summaries, current_step, _ = sess.run([all_summary, global_step, train], feed_dict={
                    input: batch_xs,
                    labels: batch_labels
                })
    [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
    [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
]

"""
per the discussion here:
http://stackoverflow.com/questions/33712178/tensorflow-nan-bug
"""
with tf.Graph().as_default(), tf.device('/cpu:0'):
    logits = tf.constant(outputs, dtype=tf.float64)
    batch_labels = tf.constant(labels, dtype=tf.float64)
    cross_entropy = - tf.div(
        tf.reduce_mean(
            tf.mul(batch_labels, tf.nn.log_softmax(logits)),
            reduction_indices=[1]
        ),
        tf.reduce_mean(
            logits,
            reduction_indices=[1]
        )
    )

    with tf.Session() as sess:
        print("here is the calculated loss before being summed up.")
        results = sess.run([logits, cross_entropy])
        print("======")
        cprint(c('logits', 'green') + '\n' + str(results[0]))
        print("------")
        cprint(c('cross_entropy', 'green') + '\n' + str(results[1]))
        print("======")
features_expanded = tf.reshape(deep_features, shape=[-1, 2, 1])
labels_expanded = tf.reshape(labels, shape=[-1, 1, 3])

samples_per_label = tf.reduce_sum(
    labels_expanded,
    reduction_indices=[0]
)

centroids = \
    tf.reduce_sum(
        tf.reshape(deep_features, shape=[-1, 2, 1]) * \
        labels_expanded,
        reduction_indices=[0]
    ) / samples_per_label

centroids_expanded = tf.reshape(centroids, shape=[1, 2, 3]) * labels_expanded

spread = \
    tf.reduce_mean(
        tf.reduce_sum(
            tf.square(
                features_expanded * labels_expanded - centroids_expanded
            ),
            reduction_indices=[1, 2]
        )
    )

with tf.Session() as sess:
    result, = sess.run([spread])
    cprint(c(result, 'red'))