Exemplo n.º 1
0
def test(path, mode, **kwargs):
    if mode == "gan":
        samplefiles = utils.parse_file([kwargs["samples"]], ext="h5")
        trainfiles = utils.parse_file(path, ext="pth")
        agent = gan.GAN(
            lr=PARAMS["GAN"]["lr"],
            x_size=PARAMS["GAN"]["x_size"],
            u_size=PARAMS["GAN"]["u_size"],
            z_size=PARAMS["GAN"]["z_size"],
        )

        for trainfile in trainfiles:
            agent.load(trainfile)
            agent.eval()

            logger = logging.Logger(path="data/tmp.h5", max_len=500)

            dataloader = gan.get_dataloader(samplefiles, shuffle=False)
            for i, (state, action) in enumerate(tqdm.tqdm(dataloader)):
                fake_action = agent.get_action(state)
                state, action = map(torch.squeeze, (state, action))
                fake_action = fake_action.ravel()
                logger.record(state=state,
                              action=action,
                              fake_action=fake_action)

            logger.close()

            print(f"Test data is saved in {logger.path}")
Exemplo n.º 2
0
def cpu_temp_get():
  temp_vals = parse_file(path_temp, re_temp)
  temp = '--'
  try:
    temp = '%02d%s' % (int(temp_vals['temp'][0]), temp_vals['unit'][0])
  except Exception, e:
    temp = 'error: %s,' % e
Exemplo n.º 3
0
def pepperoni(source, d, backup, write, rules):
    """
    Pepperoni will read the python file SOURCE and modify it to match PEP008 standards
    """

    if source:
        with open(source, "rb") as fh:
            data = fh.readlines()

        if data:
            corrected = utils.parse_file(data, rwd=rules)
            if d:
                dest = d
            else:
                if backup:
                    dest = utils.bak_file(source)
                else:
                    dest = source

            if write:

                with open(source, "wb") as fh:
                    fh.writelines(data)
                with open(dest, "wb") as fh:
                    fh.writelines(corrected)
            else:
                sys.stderr(corrected)
    else:
        print "Warning: No python file passed. Nothing was changed."
Exemplo n.º 4
0
def test():
    input_path = 'mp1_ec/inputs/' + 'bigDots' + '.txt'
    maze, states, pacman, dots = parse_file(input_path)
    print("Now searching: {0}.txt".format('bigDots'))
    results = get_distances(states, dots)
    for a in results:
        print(a, results[a])
Exemplo n.º 5
0
def cpu_freq_get():
  cpu_vals = parse_file(path_cpuinfo, re_cpu)
  cpu = '--'
  try:
    cpu = '/'.join(map(mhz_to_ghz, cpu_vals['mhz']))
  except Exception, e:
    cpu = 'error: %s,' % e
Exemplo n.º 6
0
def part1_2():
    for file_name in FILE_NAMES_2:
        input_path = 'mp1.2/inputs/' + file_name + '.txt'
        maze, states, pacman, dots = parse_file(input_path)
        print("Now searching: {0}.txt".format(file_name))
        num_nodes_expanded, sol, order = astar_ec(states, pacman, dots)
        output_path = 'mp1.2/outputs/' + file_name + '_sol_astar_multiple.txt'
        print_sol_multiple(output_path, maze, sol, order, num_nodes_expanded)
Exemplo n.º 7
0
def solve():
    (days, books_num, libraries_num, books_scores, num_of_books_in_library,
     signup_time_for_library, books_per_day_from_lib,
     book_ids_for_library) = parse_file("inputs/d_tough_choices.txt")
    sorted_libs = np.flip(np.argsort([len(x) for x in book_ids_for_library]))
    sorted_books = book_ids_for_library[sorted_libs]
    return (sorted_libs, sorted_books, num_of_books_in_library,
            books_per_day_from_lib, days, signup_time_for_library)
Exemplo n.º 8
0
def test_parse_file():
    content = parse_file('test1.txt')
    control = get_file_content('test3.txt')

    actual_content = "\n".join(content)

    error_msg = error_msg_tpl % (actual_content, control)    
    assert actual_content == control, error_msg
Exemplo n.º 9
0
def test_circular_ref():
    content = parse_file('test4.txt')
    actual_content = "\n".join(content)

    control = get_file_content('test5.txt')

    error_msg = error_msg_tpl % (actual_content, control)
    assert actual_content == control, error_msg
Exemplo n.º 10
0
def part1_ec():
    input_path = 'mp1_ec/inputs/bigDots.txt'
    output_path = 'mp1_ec/outputs/bigDots_sol.txt'
    #input_path = 'mp1.2/inputs/' + 'mediumSearch' + '.txt'

    maze, states, pacman, dots = parse_file(input_path)
    num_expanded, path, order, win = astar_ec_anim(states, pacman, dots, maze)
    draw_sol(win, path, order)
    print(num_expanded, len(path))
Exemplo n.º 11
0
async def create_group(request):
    data = await request.json()
    employees = parse_file(data['file'])
    async with request.app['db_pool'].acquire() as conn:
        db_group = deserialize_json(await db.get_last_group(conn))
        # print('CALLED  create_group()')
        new_group = create_new_group(*db_group, employees)
        await db.create_data(conn, new_group)
    return web.json_response(employees)
Exemplo n.º 12
0
def main():
    try:
        in_file_name = sys.argv[1]
    except Exception:
        sys.stderr.write('Usage: python scc.py ${in_file_name}')

    graph = parse_file(in_file_name)

    result = scc(graph)
Exemplo n.º 13
0
def get_image_with_bbox(attrs):
    images = parse_file(config.get('deepfashion', 'attributes_file'),
                        val_type=int, key_item_id=None, validate_fields=False)
    attrs = parse_attr(attrs, _PREDEFINED_ATTR)
    filtered = filter_items(images, attrs)

    image_files = append_path(config.get('deepfashion', 'image_dir'), filtered, key='image_name')
    boxes = bbox(filtered)

    return image_files, boxes
Exemplo n.º 14
0
def part1_1():
    for file_name in FILE_NAMES_1:
        input_path = 'mp1.1/inputs/' + file_name + '.txt'
        print("Now searching: {0}.txt".format(file_name))
        maze, states, pacman, dots = parse_file(input_path)
        for search in SEARCHES_1:
            visited, num_nodes_expanded = search(states, pacman, dots[0])
            sol = visited_to_path(visited, dots[0])
            output_path = 'mp1.1/outputs/' + file_name + '_sol_' + search.__name__ + '.txt'
            print_sol(output_path, maze, sol, num_nodes_expanded)
Exemplo n.º 15
0
def matching_helper(f1,
                    f2,
                    lang,
                    ins=None,
                    args=None,
                    entryfnc="main",
                    datadir="data"):
    f1 = get_full_data_filename(f1, reldir=datadir)
    f2 = get_full_data_filename(f2, reldir=datadir)

    parser = getlangparser(lang)
    inter = getlanginter(lang)

    m1 = parse_file(f1, parser)
    m2 = parse_file(f2, parser)

    M = Matching(ignoreio=not ins, ignoreret=not args)
    return not not M.match_programs(
        m1, m2, inter, ins=ins, args=args, entryfnc=entryfnc)
Exemplo n.º 16
0
def minimum_spanning_tree(input='', starting_vert=0, verbose=True):
    """Parse the input file (utils.parse_file) and runs the algorithm using
     the edges and the vertices obtained from parsing. Returns the edges list
     of a minimum spanning tree (there may be multiples MST).

    Args:
        input (str): Input file path (.txt format)
        starting_vert (int, optional): Source vertex key
        verbose (bool, optional): Whether, or not, it should print the result

    Returns:
        list: Chosen edges with new values for 'value' and 'parent_key'

    Raises:
        FileNotFoundError: Raise when input file is not found
    """
    try:
        vertices, edges = parse_file(input, verbose)
    except FileNotFoundError as e:
        raise e

    # Forest array used for avoiding cyclic trees
    forests = [i for i in range(len(vertices))]
    chosen_edges = []
    sorted_edges = sorted(edges, key=lambda x: x.weight)

    if verbose:
        print('\nForest array: ', forests, end='\n\n')
        print('#### SORTED EDGES ####', *sorted_edges, sep='\n')

    for edge in sorted_edges:

        # If both vertices are not in the same forest
        if forests[edge.vert_pair[0].key] != forests[edge.vert_pair[1].key]:

            chosen_edges.append(edge)

            # Unite both forests, updating each one of its vertex's forest index to the first vertex's forest index
            first_forest_val = forests[edge.vert_pair[0].key]
            second_forest_val = forests[edge.vert_pair[1].key]

            for i, forest in enumerate(forests):
                if forest in [first_forest_val, second_forest_val]:
                    forests[i] = first_forest_val

    if verbose:
        print('\nForest array: ', forests, end='\n\n')
        print('#### CHOSEN EDGES ####')
        for e in chosen_edges:
            print(
                f'Weight: {e.weight}  \tVertices: {e.vert_pair[0].key}-{e.vert_pair[1].key}'
            )

    return chosen_edges
Exemplo n.º 17
0
def process_inifile(inifile):
    width_size = float(inifile.split("geometry_")[1].split("_")[0])
    trajfile = "trajectories/traj" + inifile.split("ini")[2]
    if not path.exists(trajfile):
        logging.critical("trajfile <%s> does not exist"%trajfile)
        exit(FAILURE)

    fps, N, traj = parse_file(trajfile)
    name = "times" + inifile.split("ini")[2]
    J = rolling_flow(fps, N, traj, 61, name)
    return [N, width_size, J]
Exemplo n.º 18
0
def solve_c():
    (days, books_num, libraries_num, books_scores, num_of_books_in_library, signup_time_for_library,
     books_per_day_from_lib, book_ids_for_library) = parse_file("inputs/c_incunabula.txt")
    res = parse("inputs/c_incunabula.txt")
    # Sort by (total_book_score/signup_time), then do greedy
    # print (initial_library_scores(res))
    scores = initial_library_scores(res)
    sorted_libs = np.flip(np.argsort([score[0] for score in scores]))
    sorted_books = book_ids_for_library[sorted_libs]
    return (sorted_libs, sorted_books , num_of_books_in_library, books_per_day_from_lib,
            days, signup_time_for_library)
Exemplo n.º 19
0
def process_inifile(inifile):
    width_size = float(inifile.split("geometry_")[1].split("_")[0])
    trajfile = "trajectories/traj" + inifile.split("ini")[2]
    if not path.exists(trajfile):
        logging.critical("trajfile <%s> does not exist" % trajfile)
        exit(FAILURE)

    fps, N, traj = parse_file(trajfile)
    name = "times" + inifile.split("ini")[2]
    J = rolling_flow(fps, N, traj, 61, name)
    return [N, width_size, J]
Exemplo n.º 20
0
def test_ignore_lines():
    ignore_lines = [
        "^\#.*",
    ]
    content = parse_file('test6.txt', ignore_lines=ignore_lines)
    actual_content = "\n".join(content)

    control = get_file_content('test7.txt')

    error_msg = error_msg_tpl % (actual_content, control)
    assert actual_content == control, error_msg
Exemplo n.º 21
0
def main():
    try:
        sm = sys.argv[1].lower()
        file_to_read = sys.argv[2]
        file_to_write = sys.argv[3]
    except IndexError:
        print(
            "Enter valid command arguments !Usage : python bw.py <method> <problem file> <solution file>"
        )
        exit(0)

    data_folder = os.path.join("input_files")

    file_to_open = os.path.join(data_folder, file_to_read)
    try:
        with open(file_to_open, 'r') as f:

            objects, begin_config, goal_config = parse_file(f)

            initial_state = BlockState(begin_config, len(begin_config),
                                       objects)
            if sm == "breadth":
                state, nodes, max_depth, running_time = s.bfs_search(
                    initial_state, goal_config)
            elif sm == "depth":
                state, nodes, max_depth, running_time = s.dfs_search(
                    initial_state, goal_config)
            elif sm == "best":
                state, nodes, max_depth, running_time = s.best_first_search(
                    initial_state, goal_config)
            elif sm == "astar":
                state, nodes, max_depth, running_time = s.a_star_search(
                    initial_state, goal_config)
            else:
                print(
                    "Enter valid command arguments !Usage : python bw.py <method> <problem file> <solution file>"
                )
                exit(0)

            moves = s.calculate_path_to_goal(state)
            write_in_file(moves, file_to_write)

            print("cost_of_path:", state.cost)
            print("nodes_expanded:", nodes)
            print("max_search_depth:", max_depth)
            print("running_time:", running_time)

            valid = s.is_valid(initial_state, moves, goal_config)
            if valid:
                print('valid_solution: true')
            else:
                print('valid_solution: false')
    except EnvironmentError:
        print("File not found!")
Exemplo n.º 22
0
def solve_b():
    (days, books_num, libraries_num, books_scores, num_of_books_in_library,
     signup_time_for_library, books_per_day_from_lib,
     book_ids_for_library) = utils.parse_file("inputs/b_read_on.txt")

    sorted_libs = np.argsort(signup_time_for_library)
    sorted_books = book_ids_for_library[sorted_libs]
    # print(sorted_libs)
    # print(signup_time_for_library[sorted_libs])

    return (sorted_libs, sorted_books, num_of_books_in_library,
            books_per_day_from_lib, days, signup_time_for_library)
Exemplo n.º 23
0
def main():
    events.extend(parse_file("Files/Trading/"))
    events.extend(parse_file("Files/Deposits/"))
    events.extend(parse_file("Files/Buy/"))
    events.extend(parse_file("Files/Staking/"))
    events.extend(parse_file("Files/Conversions/"))
    events.extend(parse_file("Files/Airdrops/"))
    generate_file(events)
Exemplo n.º 24
0
def minimum_spanning_tree(input='', starting_vert=0, verbose=True):
    """Parse the input file (utils.parse_file) and runs the algorithm using
     the edges and the vertices obtained from parsing. Returns the modified
     vertices list ('value' and 'parent_key' have new values).

    Args:
        input (str): Input file path (.txt format)
        starting_vert (int, optional): Source vertex key
        verbose (bool, optional): Whether, or not, it should print the result

    Returns:
        list: Chosen edges with new values for 'value' and 'parent_key'

    Raises:
        FileNotFoundError: Raise when input file is not found
    """
    try:
        vertices, edges = parse_file(input, verbose)
    except FileNotFoundError as e:
        raise e

    # settings
    vertices[starting_vert].value = 0
    queue = vertices[:]  # temporary vertex queue list
    chosen_vertices = []

    while len(queue):
        # queue = sorted(queue, key=lambda v: v.value)
        min_heapify(queue)
        vertex = queue.pop(0)
        chosen_vertices.append(vertex)
        next_edges = search_vertex_edges(edges=edges,
                                         vertex_key=vertex.key,
                                         excluded_verts=chosen_vertices)

        # checking the adjacent new edges
        for edge in next_edges:

            other_vertex = edge.get_other_vertex(vertex.key)

            if edge.weight < other_vertex.value:
                other_vertex.parent_key = vertex.key
                other_vertex.value = edge.weight

    if verbose:
        print_vertex_tree(chosen_vertices)

    return chosen_vertices
Exemplo n.º 25
0
def test_list_comp():
    f = get_full_data_filename("comp.py")
    parser = getlangparser("py")
    inter = getlanginter("py")

    m = parse_file(f, parser)
    inter = inter(entryfnc="main")

    ios = [([], []), ([1], [2]), ([1, 2, 3], [2, 3, 4])]

    retvar = prime(VAR_RET)

    for i, o in ios:
        trace = inter.run(m, args=[i])
        print(trace)
        value = trace[-1][2][retvar]
        assert value == o
Exemplo n.º 26
0
def get_mem_stat():
  stat = parse_file(path_meminfo, re_all)
  mem = dict([(k, get_int(v)) for k, v in stat.items()])
  mem_free = mem['free'] + mem['buffers'] + mem['cached']
  mem_used = mem['total'] - mem_free
  mem_usage = mem_used / mem['total'] * 100.0

  if mem['swap_total'] == 0:
    swap_usage = 0
  else:
    swap_usage = 100 - (mem['swap_free'] / mem['swap_total'] * 100.0)
    swap_used = (mem['swap_total'] - mem['swap_free']) / 2 ** 20

  s = 'ram: %.2fG (%02d%%)' % (mem_used / 2 ** 20, mem_usage)
  if swap_usage > 0:
    s += ' swap: %.1fG (%02d%%)' % (swap_used, swap_usage)
  return s
Exemplo n.º 27
0
def update_output(list_of_contents, model_type, n_init, max_iter, n_clusters,
                  eps, min_samples, column1, column2, list_of_names,
                  list_of_dates):
    if list_of_contents:
        if column1 == column2:
            return html.P("Select different columns!")

        contents = list_of_contents[0]
        content_type, content_string = contents.split(',')
        decoded = base64.b64decode(content_string)
        try:
            df = parse_file(decoded, content_type, list_of_names)
        except Exception as e:
            print(e)
            return html.Div(['There was an error processing this file.'])

        if len(df.columns) > 8:
            return html.P("Dataframe has too many columns!")

        model = Modeller(
            df, model_type,
            **dict(n_init=n_init,
                   max_iter=max_iter,
                   n_clusters=n_clusters,
                   eps=eps,
                   min_samples=min_samples))
        try:
            labels = model.set_up_model([column1, column2])
        except Exception as e:
            return html.P(str(e))

        if not labels:
            return html.P("Please, fill in all the parameters in green.")
        return dcc.Graph(id="chart",
                         figure=labels,
                         style={
                             'display': 'block',
                             'height': 600,
                             'width': 900,
                             'margin-left': 'auto',
                             'margin-right': 'auto'
                         })
    return html.P("")
Exemplo n.º 28
0
def cpu_stat_next():
  try:
    stat_vals = parse_file(path_stats, re_stats)
    stat_vals = dict([(k, [int(w) for w in v]) for k, v in stat_vals.items()])
    total = list()
    for i in stat_vals['cpu']:
      dtotal = \
          stat_vals['user'][i]   - old_stats['user'][i]   + \
          stat_vals['system'][i] - old_stats['system'][i] + \
          stat_vals['nice'][i]   - old_stats['nice'][i]   + \
          stat_vals['idle'][i]   - old_stats['idle'][i]   + \
          stat_vals['iowait'][i] - old_stats['iowait'][i]
      if dtotal == 0:
        total.append("00%")
      else:
        idle = stat_vals['idle'][i] + stat_vals['iowait'][i] - old_stats['idle'][i] - old_stats['iowait'][i]
        total.append('%02d%%' % (99 - (idle * 99 / dtotal)))
    old_stats.update(stat_vals)
    return ','.join(total)
  except Exception, e:
    return 'error: %s,' % e
Exemplo n.º 29
0
def plot(path, mode, **kwargs):
    import figures

    if mode == "sample":
        files = utils.parse_file(path)
        canvas = []
        for file in tqdm.tqdm(files):
            canvas = figures.plot_single(file, canvas=canvas)
    if mode == "hist":
        figures.plot_hist(path)
    if mode == "gan":
        figures.plot_gan(path)
    if mode == "copdac":
        figures.train_plot(path)

    # if kwargs["train"]:
    #     figures.train_plot(path)
    # else:
    #     dataset = logging.load(path)
    #     figures.plot_mult(dataset)

    figures.show()
Exemplo n.º 30
0
def run(path, **kwargs):
    logger = logging.Logger(log_dir=".", file_name=kwargs["out"], max_len=100)
    data = logging.load(path)
    expname = os.path.basename(path)
    envname, agentname, *_ = expname.split("-")
    env = getattr(envs, envname)(initial_perturb=[1, 0.0, 0,
                                                  np.deg2rad(10)],
                                 dt=0.01,
                                 max_t=40,
                                 solver="rk4",
                                 ode_step_len=1)
    agent = getattr(agents, agentname)(env,
                                       lrw=1e-2,
                                       lrv=1e-2,
                                       lrtheta=1e-2,
                                       w_init=0.03,
                                       v_init=0.03,
                                       theta_init=0,
                                       maxlen=100,
                                       batch_size=16)
    agent.load_weights(data)

    print(f"Runnning {expname} ...")
    _run(env, agent, logger, expname, **kwargs)

    logger.close()

    if kwargs["with_plot"]:
        import figures

        files = utils.parse_file(kwargs["out"])
        canvas = []
        for file in tqdm.tqdm(files):
            canvas = figures.plot_single(file, canvas=canvas)

        figures.show()
Exemplo n.º 31
0
def main():
    try:
        file_to_read = sys.argv[1]
    except IndexError:
        print("Enter valid command arguments !" + '\n' +
              "Usage : python fa.py <automaton description>")
        exit(0)

    data_folder = os.path.join("automaton descriptions")

    file_to_open = os.path.join(data_folder, file_to_read)
    try:
        with open(file_to_open, 'r', encoding="utf8") as f:
            # parsing of file
            num_of_states, initial, final_states, transitions = parse_file(f)
            # initialization of automaton
            automaton = FiniteAutomaton(num_of_states, initial, final_states,
                                        transitions)
            # initialize Insert GUI
            root = tk.Tk()
            insert_gui = InsertGUI(root, automaton)
            root.mainloop()
    except EnvironmentError:
        print("File not found!")
                    if X_test[i][j] != 0:
                        _prob += math.log(
                            self.prob[int(c) - 1][j]) * X_test[i][j]
                if _prob > _max:
                    _max = _prob
                    _c = c
            y_pred.append(_c)
        return y_pred

    def accuracy(self, y_test, y_pred):
        count = 0
        for i in range(len(y_test)):
            if str(y_test[i]) == str(y_pred[i]):
                count += 1
        print('Acc: %.2f' % (count * 100 / len(y_test)), end=' %')


if __name__ == '__main__':
    X_, y = utils.parse_file('training_data.txt')
    utils.build_dict(X_)
    DICT = utils.load_dict()
    X = np.zeros((len(X_), len(DICT)))
    for i in range(len(X_)):
        X[i] = utils.bag_of_word(X_[i], DICT)
    X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
    model = MultinomialNB(0.1)
    model.fit(X_train, y_train)
    print(X_test.shape[1])
    y_pred = model.predict(X_test)
    model.accuracy(y_test, y_pred)
Exemplo n.º 33
0
def cpu_stat_init():
  stat_vals = parse_file(path_stats, re_stats)
  # convert values to int's
  stat_vals = dict([(k, [int(w) for w in v]) for k, v in stat_vals.items()])
  old_stats.update(stat_vals)
Exemplo n.º 34
0
import argparse

from models import Grid, Position, LawnMower
from utils import parse_file

parser = argparse.ArgumentParser(description='Lawn mowers processor.')
parser.add_argument('lm_data_file',
                    metavar='lm-data-file',
                    type=str,
                    help='Lawn mowers data file locations.')
args = parser.parse_args()

if __name__ == '__main__':
    parse_data = parse_file(args.lm_data_file)
    grid = Grid(*parse_data['grid'])
    lm_actions = []
    for lm in parse_data['lawn_mowers']:
        position = Position(*lm['position'], grid=grid)
        orientation = lm['orientation']
        lm_actions.append([LawnMower(position, orientation), lm['actions']])
    for lawn_mower, actions in lm_actions:
        lawn_mower.perform_actions(actions)
        print(lawn_mower.position.x, lawn_mower.position.y,
              lawn_mower.orientation)
Exemplo n.º 35
0
def plot_accuracies(accs, names):
    """ Plot different accuracies using 'matplotlib'. """
    for i in range(len(accs)):
        plt.plot([0.0, 0.1, 0.2, 0.5], accs[i], label=names[i])
    plt.grid()
    plt.ylabel('Accuracy (%)')
    plt.xlabel('Probability of a removed value (p)')
    plt.title('Decision Tree Learning Accuracy - Various Datasets')
    plt.legend()
    plt.show()


if __name__ == '__main__':

    # CAR DATASET
    car = parse_file("datasets/car.txt")
    car_attrnames = [
        "buying", "maint", "doors", "persons", "lug_boot", "safety", "class"
    ]
    car_dataset = DataSet(name="car",
                          examples=car,
                          attrnames=car_attrnames,
                          target="class")
    car_accuracy = test(car_dataset)

    # PHISHING DATASET
    phishing = parse_file("datasets/phishing.txt")
    phishing_attrnames = [
        "SFH", "popUpWindow", "SSLfinal_State", "Request_URL", "URL_of_Anchor",
        "web_traffic", "URL_Length", "age_of_domain", "having_IP_Address",
        "Result"
Exemplo n.º 36
0
def main():
    args = get_argparser()
    attributes = utils.parse_file(args.attribute)
    attribute = args.model_weight.split("/")[-2]
    attributes = attributes[attribute]
    attribute_model =\
        backbone.Resnet.get_model(args.version, len(attributes),
                                  False,
                                  args.model_weight, 7, train=False
                                  )
    preprocess = utils.preprocess

    total = 0
    correct = 0
    total_per_attribute = {}
    correct_per_attribute = {}
    preds, labels = [], []
    if args.cuda:
        attribute_model.cuda()
    test_set = utils.parse_val_file(args.val_file)
    with torch.no_grad():
        for image in test_set:
            total += 1
            label = image.strip()[-1]
            rgb = Image.open(image.strip()[:-2]).convert("RGB")
            preprocessor = preprocess()
            tensor = preprocessor(rgb).view(1, 3, 224, 224)
            if args.cuda:
                tensor = tensor.cuda()
            output = attribute_model(tensor)
            pred = torch.max(output, 1)[1]
            print (attributes[pred], attributes[int(label)],
                   attributes[pred] == attributes[int(label)])
            if attributes[int(label)] not in total_per_attribute.keys():
                total_per_attribute[attributes[int(label)]] = 1
            else:
                total_per_attribute[attributes[int(label)]] += 1

            if attributes[pred] == attributes[int(label)]:
                if attributes[pred] not in correct_per_attribute.keys():
                    correct_per_attribute[attributes[int(label)]] = 1
                else:
                    correct_per_attribute[attributes[int(label)]] += 1

                correct += 1

            print (float(correct)/total, total, correct)
            preds.append(pred.cpu().numpy()[0])
            labels.append(int(label))

            save_path = os.path.join(
                args.save_dir, attribute)
            # +"/"+image.split("/")[-1][:-2]
            os.makedirs(save_path, exist_ok=True)
            save_img = save_path + \
                "/"+image.split("/")[-1][:-3]

            if args.save_img and not os.path.exists(save_img):
                utils.save_img(save_img, rgb, attribute, {attributes[
                    int(label)]: attributes[int(pred)]})
    save_plt = save_path+"/"+"cmatrix.jpg"
    utils.plot_confusion_matrix(
        labels, preds, np.array(attributes), True, "Confusion Matrix")
    plt.savefig(save_plt)
    print (total_per_attribute.keys(), np.array(
        list(correct_per_attribute.values()))/np.array(list(total_per_attribute.values())))
Exemplo n.º 37
0
    def test_sample_5(self):
        graph = parse_file("sample-5.txt")
        result = scc(graph)
        answer = [6, 3, 2, 1]

        self.assertEqual(result, answer)
Exemplo n.º 38
0
from collections import Counter


# In[2]:


data_dir = "./data/"
train_filename = "topicclass/topicclass_train.txt"
valid_filename = "topicclass/topicclass_valid.txt"
test_filename = "topicclass/topicclass_test.txt"


# In[3]:


train_X, train_y = utils.parse_file(data_dir + train_filename)
valid_X, valid_y = utils.parse_file(data_dir + valid_filename)
test_X = utils.parse_file(data_dir + test_filename, has_labels=False)


# In[4]:


vocab_size = len(utils.word2index) - 1


# In[5]:


sparse_train_X = sp.dok_matrix((len(train_X), vocab_size), dtype=np.int8)
sparse_valid_X = sp.dok_matrix((len(valid_X), vocab_size), dtype=np.int8)
Exemplo n.º 39
0
def train(sample, mode, **kwargs):
    samplefiles = utils.parse_file(sample, ext="h5")

    if mode == "gan" or mode == "all":
        torch.manual_seed(0)
        np.random.seed(0)

        gandir = kwargs["gan_dir"]
        histpath = os.path.join(gandir, "train-history.h5")

        print("Train GAN ...")

        agent = gan.GAN(
            lr=kwargs["gan_lr"],
            x_size=PARAMS["GAN"]["x_size"],
            u_size=PARAMS["GAN"]["u_size"],
            z_size=PARAMS["GAN"]["z_size"],
            use_cuda=kwargs["use_cuda"],
        )

        if kwargs["continue"] is not None:
            epoch_start = agent.load(kwargs["continue"])
            logger = logging.Logger(path=histpath,
                                    max_len=kwargs["save_interval"],
                                    mode="r+")
        else:
            epoch_start = 0
            logger = logging.Logger(path=histpath,
                                    max_len=kwargs["save_interval"])

        t0 = time.time()
        for epoch in tqdm.trange(epoch_start,
                                 epoch_start + 1 + kwargs["max_epoch"]):
            dataloader = gan.get_dataloader(samplefiles,
                                            shuffle=True,
                                            batch_size=kwargs["batch_size"])

            loss_d = loss_g = 0
            for i, data in enumerate(tqdm.tqdm(dataloader)):
                agent.set_input(data)
                agent.train()
                loss_d += agent.loss_d.mean().detach().numpy()
                loss_g += agent.loss_g.mean().detach().numpy()

            logger.record(epoch=epoch, loss_d=loss_d, loss_g=loss_g)

            if (epoch % kwargs["save_interval"] == 0
                    or epoch == epoch_start + 1 + kwargs["max_epoch"]):
                savepath = os.path.join(gandir, f"trained-{epoch:05d}.pth")
                agent.save(epoch, savepath)
                tqdm.tqdm.write(f"Weights are saved in {savepath}.")

        print(f"Elapsed time: {time.time() - t0:5.2f} sec")

    if mode == "copdac" or mode == "all":
        np.random.seed(1)

        env = envs.BaseEnv(initial_perturb=[0, 0, 0, 0.2])

        copdacdir = kwargs["copdac_dir"]

        agentname = "COPDAC"
        Agent = getattr(agents, agentname)
        agent = Agent(
            env,
            lrw=PARAMS["COPDAC"]["lrw"],
            lrv=PARAMS["COPDAC"]["lrv"],
            lrtheta=PARAMS["COPDAC"]["lrtheta"],
            w_init=PARAMS["COPDAC"]["w_init"],
            v_init=PARAMS["COPDAC"]["v_init"],
            theta_init=PARAMS["COPDAC"]["lrv"],
            maxlen=PARAMS["COPDAC"]["maxlen"],
            batch_size=PARAMS["COPDAC"]["batch_size"],
        )

        expname = "-".join([type(n).__name__ for n in (env, agent)])
        if kwargs["with_gan"]:
            expname += "-gan"
            agent.set_gan(kwargs["with_gan"], PARAMS["COPDAC"]["lrg"])

        if kwargs["with_reg"]:
            expname += "-reg"
            agent.set_reg(PARAMS["COPDAC"]["lrc"])

        histpath = os.path.join(copdacdir, expname + ".h5")
        if kwargs["continue"] is not None:
            epoch_start, i = agent.load(kwargs["continue"])
            logger = logging.Logger(path=histpath, max_len=100, mode="r+")
        else:
            epoch_start, i = 0, 0
            logger = logging.Logger(path=histpath, max_len=100)

        print(f"Training {agentname}...")

        epoch_end = epoch_start + kwargs["max_epoch"]
        for epoch in tqdm.trange(epoch_start, epoch_end):
            dataloader = gan.get_dataloader(samplefiles,
                                            keys=("state", "action", "reward",
                                                  "next_state"),
                                            shuffle=True,
                                            batch_size=64)

            for data in tqdm.tqdm(dataloader, desc=f"Epoch {epoch}"):
                agent.set_input(data)
                agent.train()

                if i % kwargs["save_interval"] == 0 or i == len(dataloader):
                    logger.record(epoch=epoch,
                                  i=i,
                                  w=agent.w,
                                  v=agent.v,
                                  theta=agent.theta,
                                  loss=agent.get_losses())

                i += 1

        logger.close()
Exemplo n.º 40
0
    def test_sample_1(self):
        graph = parse_file("sample-1.txt")
        result = scc(graph)
        answer = [3, 3, 3]

        self.assertEqual(result, answer)
Exemplo n.º 41
0
def main():
    execution_times = []
    nodes_problem = []
    list_files = []
    movements = []

    all_files = []
    data_for_excel = []

    while True:
        try:
            print(
                "\nEscoja que tipo de algoritmos desea realizar: "
                "\n[D]esinformados: Breadth-First Search & Depth-First Search"
                "\n[I]nformados: A-star (3 heuristicas)")
            enter = input().lower()

            if enter == "d":
                busqueda = ["breadth", "depth"]
                path = "bd-input_files/"
                hoja = "Desinformed"
                break
            elif enter == "i":
                busqueda = ["heuristica_1", "heuristica_2", "ambas"]
                path = "astar-input_files/"
                hoja = "A-star"
                break

        except Exception:
            print("Solo ingrese una letra, porfavor")
            exit(0)

    for sm in busqueda:
        for file in listdir(path):

            # obtener datos iniciales del problema
            objects, begin_config, goal_config = parse_file(path + file)

            # obtener el estado inicial
            initial_state = BlockState(begin_config, len(begin_config),
                                       objects)

            # realizar el tipo de busqueda especificado
            if sm == "breadth":
                state, nodes, running_time = s.bfs_search(
                    initial_state, goal_config)
            elif sm == "depth":
                state, nodes, running_time = s.dfs_search(
                    initial_state, goal_config)
            elif enter == "i":
                state, nodes, running_time = s.a_star_search(
                    initial_state, goal_config, sm)

            # es valida nuestra solución
            moves = s.calculate_path_to_goal(state)
            valid = s.is_valid(initial_state, moves, goal_config)

            # imprimir resultados en consola del problema
            print("\nArchivo: ", file.replace(".pddl", ""))
            print("Método de Busqueda: ", sm)
            print("Se alcanzó el estado final?: ", "Si" if valid else "No")
            print("Cantidad de movimientos (Profundidad maxima recorrida): ",
                  len(moves))
            print("Nodos expandidos: ", nodes)
            print("Tiempo de Ejecución: {0:.06f} segundos\n".format(
                running_time))

            # escribir que movimientos hacer para resolver el problema
            write_in_file("solution_files/" + file.replace(".pddl", ".txt"),
                          moves)

            # datos para los plot
            list_files.append(file.replace(".pddl", "")[11:])
            movements.append(len(moves))
            nodes_problem.append(nodes)
            execution_times.append(running_time)

            # datos para el excel
            all_files.append(file.replace(".pddl", "")[11:])
            fila = [running_time, nodes, len(moves), sm]
            data_for_excel.append(fila)

        # imprimimos los tres tipos de plot para cada tipo de busqueda
        print_plot(list_files, execution_times, 'Archivo vs Tiempo',
                   'Tiempo (segundos)')
        print_plot(list_files, nodes_problem, 'Archivo vs Espacio',
                   'Cantidad de nodos')
        print_plot(list_files, movements, 'Archivo vs Movimientos',
                   'Cantidad de Movimientos')

        # re-inicializando arreglos
        list_files = []
        execution_times = []
        nodes_problem = []
        movements = []

    #subir datos a un excel
    columna = ["Tiempo", "Nodos", "Movimientos", "Busqueda"]
    create_Excel(data_for_excel, all_files, columna, hoja)

    # imprimir datos para comparar tipos de busqueda
    print_box("A-star", "Tiempo")
    print_box("A-star", "Nodos")
    print_box("A-star", "Movimientos")
Exemplo n.º 42
0
def get_image_files(attrs):
    images = parse_file(config.get('celeba', 'attributes_file'))
    attrs = parse_attr(attrs, _PREDEFINED_ATTR)
    filtered = filter_items(images, attrs)
    return append_path(config.get('celeba', 'image_dir'), filtered)
Exemplo n.º 43
0
    def test():
        def loadCar():
            for i in car.index:
                CARDICT[i] = Car(i, *(car.loc[i, :]))

        def loadRoad():
            for i in road.index:
                ROADDICT[i] = Road(i, *(road.loc[i, :]))
                if road.loc[i].isDuplex == 1:
                    ROADDICT[-i] = Road(-i, *(road.loc[i, :]))
                    ROADDICT[-i].FROM, ROADDICT[-i].TO = ROADDICT[
                        -i].TO, ROADDICT[-i].FROM
                    # exchange from to for -road
        def loadCross():
            for i in cross.index:
                CROSSDICT[i] = Cross(i, *(cross.loc[i, :]))

        PATH_CAR = car_path
        PATH_ROAD = road_path
        PATH_CROSS = cross_path
        #PATH_ANSWER = './toyconfig/answer.txt'

        road = parse_file(PATH_ROAD)
        cross = parse_file(PATH_CROSS)
        car = parse_file(PATH_CAR)
        # ans = parse_answer(PATH_ANSWER)
        # create road object
        loadRoad()
        # create cross object
        loadCross()
        # create car object
        loadCar()

        # load route to car
        # =============================================================================
        #     for i in ans.index:
        #         CARDICT[i].initRoute(ans.loc[i].StartTime, ans.loc[i].values[1:][ans.loc[i].values[1:]>0]) # del 0
        # =============================================================================
        def caculateOneCAR(CarID):
            car = CARDICT[CarID]

            testDijkstra = Dijkstra(car.FROM, car.TO, CROSSDICT, ROADDICT)
            route = testDijkstra.getdis()
            routeRoad = testDijkstra.crossToRoad(route)
            disDic = testDijkstra.disDic
            length = disDic[car.TO]

            roadSpeed = []
            for i in routeRoad:
                roadSpeed.append(ROADDICT[i].SPEED)

            speed = min(min(roadSpeed), car.SPEED)

            time = length // speed + 1  # ceil is better

            return np.abs(np.array(routeRoad)).tolist(), time

        answer = [[], [], []]

        latestPlanTime = 0
        for carID in CARDICT.keys():
            if CARDICT[carID].PLANTIME > latestPlanTime:
                latestPlanTime = CARDICT[carID].PLANTIME

        thisTime = latestPlanTime

        carlist = []
        for i in CARDICT.keys():
            carlist.append(i)

        batchsize = 200
        for i in range(len(carlist) // batchsize):
            batch = carlist[i * batchsize:(i + 1) * batchsize]
            batchAnswer = [[], [], []]
            for carID in batch:
                routeRoad, time = caculateOneCAR(carID)
                batchAnswer[0].append(CARDICT[carID].ID)
                batchAnswer[1].append(time)
                batchAnswer[2].append(routeRoad)
            #print(i)
            #print(batchAnswer[2])
            thisTime += max(batchAnswer[1])

            answer[0].extend(batchAnswer[0])
            answer[1].extend(np.array([thisTime] * batchsize) + 1)
            answer[2].extend(batchAnswer[2])

        if len(carlist) % batchsize != 0:
            lastBatch = carlist[-(len(carlist) % batchsize):]

            batchAnswer = [[], [], []]
            for carID in lastBatch:
                routeRoad, time = caculateOneCAR(carID)
                batchAnswer[0].append(CARDICT[carID].ID)
                batchAnswer[1].append(time)
                batchAnswer[2].append(routeRoad)

            thisTime += max(batchAnswer[1])

            answer[0].extend(batchAnswer[0])
            answer[1].extend(np.array([thisTime] * len(lastBatch)) + 1)
            answer[2].extend(batchAnswer[2])

# =============================================================================
#
#         for i in CARDICT.keys():
#
#             routeRoad, time = caculateOneCAR(i)
#             answer[0].append(CARDICT[i].ID)
#             answer[1].append(np.array(thisTime) + 1)
#             answer[2].append(routeRoad)
#
#             thisTime += time
# =============================================================================

        def writeAnswer(answer):
            fp = open(answer_path, 'w')
            seq = []

            for i in range(len(answer[0])):
                route = ', '.join(list(map(str, answer[2][i])))
                seq.append('(%d, %d, %s)\n' %
                           (answer[0][i], answer[1][i], route))

            fp.writelines(seq)

        writeAnswer(answer)