示例#1
0
def testMutatingSegments():
    verticalLine = Path()
    verticalLine.startingPoint = (13, 13)
    verticalLine.segments.append(Segment(Direction.UP, 5))

    horizontalLine = Path()
    horizontalLine.startingPoint = (2, 2)
    horizontalLine.segments.append(Segment(Direction.RIGHT, 6))

    trickyPath = Path()
    trickyPath.startingPoint = (4, 5)
    trickyPath.segments = [Segment(Direction.RIGHT, 1), Segment(Direction.UP, 1), Segment(Direction.RIGHT, 3),
                           Segment(Direction.DOWN, 1)]

    entity = PopulationEntity()
    entity.paths = [verticalLine, horizontalLine, trickyPath]

    board = Board(16, 16)
    visualize(entity, board,
              'C:\\Users\\Staszek\\PycharmProjects\\GeneticAlgorithmsPCB\\testresults\\testmutating-before.png')

    verticalLine.mutateSegment(0, 1)
    horizontalLine.mutateSegment(0, 1)
    trickyPath.mutateSegment(2, 1)

    visualize(entity, board,
              'C:\\Users\\Staszek\\PycharmProjects\\GeneticAlgorithmsPCB\\testresults\\testmutating-after.png')
示例#2
0
    def run(self):
        if self.options.compute_metrics(
        ) or self.options.compute_derived_metrics(
        ) or self.options.normalize_results():
            self._get_input_file_meta_info()

        if self.options.compute_metrics():
            self._compute_metrics()
        else:
            self.metrics = self.results.fetch_most_recent()

        if self.options.compute_derived_metrics():
            self._compute_derived_metrics()

        if self.options.store():
            self._store_results()

        if self.options.normalize_results():
            self._normalize_metrics()

        if self.options.show_table():
            table = self.results._get_most_recent_table()
            print(
                pandas.read_sql_query(
                    "SELECT * FROM '{}'".format(table),
                    self.results.conn).to_string(index=False))

        if self.options.show_graph():
            visualizer.visualize(self.metrics)
示例#3
0
def main(dataset):
    np_dataset = np.loadtxt(dataset)
    k = 10
    accuracies = []
    pruned_accuracies = []
    agg_confusion_matrix = np.zeros((4, 4))
    agg_pruned_confusion_matrix = np.zeros((4, 4))
    # Generate training and test dataset arrays.
    training_sets, test_sets = generate_test_training(np_dataset, k)

    # Evaluation on pre-pruned tree
    for i in range(k):
        training_db = training_sets[i]
        test_db = test_sets[i]
        # Train
        trained_tree, depth = train(training_db, 1)
        # Evaluate
        (accuracy, confusion_matrix) = evaluate(test_db, trained_tree)
        agg_confusion_matrix += confusion_matrix
        accuracies.append(accuracy)
    # Calculate average accuracy
    agg_confusion_matrix /= k
    print(agg_confusion_matrix)
    calculate_measures(agg_confusion_matrix)
    average_accuracy = np.average(accuracies)

    # Tree pruning
    inner_training_sets, validation_sets = generate_test_training(
        training_sets, k - 1)
    # Evaluation on pruned tree
    for i in range(k):
        test_db = test_sets[i]
        for j in range(k - 1):
            training_db = inner_training_sets[j, i]
            validation_db = validation_sets[j, i]
            # Train
            trained_tree, depth = train(training_db, 1)
            # Evaluation
            (accuracy, confusion_matrix) = evaluate(validation_db,
                                                    trained_tree)
            # Prune
            pruned_tree, pruned_depth = prune_tree(trained_tree, validation_db,
                                                   accuracy)
            # Evaluate on now pruned tree
            (pruned_accuracy,
             pruned_confusion_matrix) = evaluate(test_db, pruned_tree)
            pruned_accuracies.append(pruned_accuracy)
            agg_pruned_confusion_matrix += pruned_confusion_matrix
    agg_pruned_confusion_matrix /= (k * (k - 1))
    print(agg_pruned_confusion_matrix)
    calculate_measures(agg_pruned_confusion_matrix)
    avg_pruned_accuracy = np.average(pruned_accuracies)

    print("Average Accuracy: ", average_accuracy)
    print("Average Accuracy of Pruned Decision Tree: ", avg_pruned_accuracy)

    # Train on the entire dataset
    tree, depth = train(np_dataset)
    # Visualize this, saving it to an aptly named file
    visualize(tree, depth, dataset[:dataset.rfind('.')] + '.png')
示例#4
0
def main():
    """ Main function
    """
    if len(sys.argv) == 1:
        # simple example
        coords = np.array([[1.0, 0, 0], [1.0, 1.0, 0], [1.5, 0, 0],
                           [1.5, 0.5, 0], [1.5, 1.0, 0], [2.0, 0, 0],
                           [2.0, 1.0, 0], [2.5, 0, 0], [2.5, 1.0, 0],
                           [3.0, 0, 0], [3.0, 0.5, 0], [3.0, 1.0, 0],
                           [3.5, 0.5, 0]])

        #coords = np.array([
        #    [1,0,0],
        #    [1.5,0,0],
        #    [2,0,0],
        #    [2.5,0,0],
        #    [3,0,0]
        #])

        contacts = deconstruct(coords, epsilon=0.51)
        rec_coords = apply_shrec3d(contacts)

        visualize([(coords, 'original points'),
                   (rec_coords, 'reconstructed points')])
    else:
        fname = sys.argv[1]

        contacts = np.loadtxt(fname)
        rec_coords = apply_shrec3d(contacts)

        np.save('%s.ptcld' % fname, rec_coords)
示例#5
0
def test(data_path, label_path, valid_frame_path, vid=None, local=True):
    import matplotlib.pyplot as plt
    norm = 'default' if args.norm else 'none'
    loader = torch.utils.data.DataLoader(
        dataset=Feeder(data_path,
                       label_path,
                       valid_frame_path,
                       normalization=norm),
        batch_size=64,
        shuffle=False,
        num_workers=2,
    )

    if vid is not None:
        sample_name = loader.dataset.sample_name
        sample_id = [name.split('.')[0] for name in sample_name]
        index = sample_id.index(vid)
        data, label, frame_num = loader.dataset[index]
        data = np.transpose(np.reshape(data[0, :], (20, 3)), (1, 0))  # (3, 20)
        from visualizer import visualize
        if not local:
            import matplotlib.pyplot as plt
            plt.switch_backend('agg')
            visualize(data, False,
                      './figures/{}{}.png'.format(vid, args.modality))
        else:
            visualize(data, True)
示例#6
0
def testGenRandomPopulation():
    board: Board = loadFromFile('textTests/zad1.txt')
    population = generateRandomPopulation(10, board)
    for index, element in enumerate(population):
        visualize(element, board,
                  f'C:\\Users\\Staszek\\PycharmProjects\\GeneticAlgorithmsPCB\\testresults\\randpop-{index}.png')
    print()
示例#7
0
def run_csp(board):
    # CSP WITHOUT BACKTRACKING
    time_csp = time.time()
    board_csp, expanded_nodes_csp, path_csp = copy.deepcopy(board).solve(CSP())
    time_csp = time.time() - time_csp
    print_result('CSP', expanded_nodes_csp, board_csp, time_csp)
    visualize(path_csp)
    draw_grid(path_csp)
示例#8
0
def main():
    start = time.time()
    board = Grid(Meta.board_height, Meta.board_width)
    fonts = Fonts(Meta.board_height)
    key = None
    run = True
    mistakes = 0

    while run:
        clock.tick(FPS)
        play_time = time.time() - start

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                run = False

            # If the mouse was clicked somewhere on board
            if event.type == pygame.MOUSEBUTTONDOWN:
                pos = pygame.mouse.get_pos()
                board.click(pos)

            # A key was pressed
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_1:
                    key = 1
                elif event.key == pygame.K_2:
                    key = 2
                elif event.key == pygame.K_3:
                    key = 3
                elif event.key == pygame.K_4:
                    key = 4
                elif event.key == pygame.K_5:
                    key = 5
                elif event.key == pygame.K_6:
                    key = 6
                elif event.key == pygame.K_7:
                    key = 7
                elif event.key == pygame.K_8:
                    key = 8
                elif event.key == pygame.K_9:
                    key = 9
                else:
                    key = None
                board.place(key)
                if board.isFinished():
                    print("Game Over")
                    run = False

                # Delete the incorrect selected entry
                if event.key == pygame.K_DELETE:
                    board.delete()

                #  Visualize Backtracking algorithm
                if event.key == pygame.K_SPACE:
                    visualize(board, win, fonts, play_time, clock)
                    run = False

        board.draw(win, fonts, play_time)
示例#9
0
    def test_visualization():
        wins_and_losses = []
        for i in range(100):
            wins_and_losses.append(
                test_iteration(iterations=3000, summary=False, total=False))
        wins_data = [(idx, num[0]) for idx, num in enumerate(wins_and_losses)]
        losses_data = [(idx, num[1])
                       for idx, num in enumerate(wins_and_losses)]

        visualize(wins_data, losses_data)
示例#10
0
def run_hill(board):
    # Hill Climbing
    time_hill = time.time()
    board_hill, explored_nodes_hill, path_hill = copy.deepcopy(board).solve(
        HillClimb())
    time_hill = time.time() - time_hill
    print_result('HillClimb', explored_nodes_hill, board_hill, time_hill)
    visualize(path_hill)

    draw_grid(path_hill)
示例#11
0
def crawl():
    """
    Main method for executing a report. Calls other modules to provide a result
    :return: void
    """
    crawler = Crawler()
    run(crawler)
    authors, articles = get(crawler.get_authors(), crawler.get_articles())
    save(authors, articles)
    visualize(authors, articles)
def main():
    """Main function."""

    # Print program info
    print('AlfheimDataset features program.')

    # Specify the list of files to read
    files = [
        '2013-11-03_tromso_stromsgodset_first_ONLY_ONE_MINUTE.csv'
    ]  #'2013-11-03_tromso_stromsgodset_first.csv']#, '2013-11-03_tromso_stromsgodset_second.csv']

    # Initializing list of frames
    frames = []

    # Reading data from files
    for file in files:
        data = load_from(file)
        frames.append(data)

    # Merging data from all files
    data = pandas.concat(frames)
    if data is None:
        return None

    # Extract features
    [alfheim_mean_speed, alfheim_mean_distances] = extract_features_from(data)

    # Features retrieved from game estraction
    # [FEATURES] The mean velocity of the players is: 3.8 m/s.
    # [FEATURES] The mean distance traveled by the players is: 8.4 Km.
    # [FEATURES] The mean distance traveled with low-intensity by the players is: 2.8 Km.
    # [FEATURES] The mean distance traveled with high-intensity by the players is: 5.6 Km.

    # Mean speed (m/s),
    #game_mean_speed = [3.8]

    # Mean distance (m/s), mean distance at low speed (m/s), mean distance at low speed (m/s)
    #game_mean_distances = [8.4, 2.8, 5.6]

    # FOR ONLY ONE MINUTE OF GAME
    # Mean speed (m/s),
    game_mean_speed = [3.7]

    # Mean distance (m/s), mean distance at low speed (m/s), mean distance at low speed (m/s)
    game_mean_distances = [0.211, 0.064, 0.147]

    y_max = 7
    labels = ('Speed (m/s)', '')
    visualizer.visualize(game_mean_speed, alfheim_mean_speed, labels, y_max)

    y_max = 1
    labels = ('Distance (Km)', 'Low speed distance (Km)',
              'High speed distance (Km)')
    visualizer.visualize(game_mean_distances, alfheim_mean_distances, labels,
                         y_max)
示例#13
0
def run_solution(lib, dataset, max_time, visual=False):
    res = {'posns':[], 'passed':False}
    def helper(res):
        res['posns']=lib.find_solution(dataset)
        res['passed']=True
    t = threading.Thread(target=helper,args=(res,))
    t.start()
    t.join(max_time)

    if visual:visualizer.visualize(dataset,res['posns'])

    return res
示例#14
0
def tryRandomSearch():
    board = loadFromFile('textTests/zad1.txt')
    result = RandomSearch.RandomSearch(board, 10, printOutput=True)
    if result is None:
        print("Could not find a solution")
    else:
        print("Found a solution:")
        visualize(
            result, board,
            f'C:\\Users\\Staszek\\PycharmProjects\\GeneticAlgorithmsPCB\\testresults\\randomsearch.png'
        )
        for path in result.paths:
            print(path)
示例#15
0
    async def post(self):
        db = self.settings['db']

        try:
            # parse request body
            data = tornado.escape.json_decode(self.request.body)

            # query task dataset from database
            pipeline = data['pipeline'].lower()
            tasks = await db.task_query_pipeline(pipeline)
            tasks = [task['trace'] for task in tasks]
            tasks_process = [
                task for task in tasks if task['process'] == data['process']
            ]

            df = pd.DataFrame(tasks_process)

            # prepare visualizer args
            args = data['args']
            args['plot_name'] = str(bson.ObjectId())

            if args['selectors'] == '':
                args['selectors'] = []
            else:
                args['selectors'] = args['selectors'].split(' ')

            # append columns from merge process if specified
            if 'merge_process' in args:
                # load merge data
                tasks_merge = [
                    task for task in tasks
                    if task['process'] == args['merge_process']
                ]
                df_merge = pd.DataFrame(tasks_merge)

                # remove duplicate columns
                dupe_columns = set(df.columns).intersection(df_merge.columns)
                dupe_columns.remove(args['merge_key'])
                df_merge.drop(columns=dupe_columns, inplace=True)

                # append merge columns to data
                df = df.merge(df_merge,
                              on=args['merge_key'],
                              how='left',
                              copy=False)

            # create visualization
            outfile = Visualizer.visualize(df, args)

            # encode image file into base64
            with open(outfile, 'rb') as f:
                image_data = base64.b64encode(f.read()).decode('utf-8')

            self.set_status(200)
            self.set_header('content-type', 'application/json')
            self.write(tornado.escape.json_encode(image_data))
        except Exception as e:
            self.set_status(404)
            self.write(message(404, 'Failed to visualize data'))
            raise e
示例#16
0
    def solve(self):
        dim = int(input('Enter the dimension of the maze: '))
        p = float(
            input(
                'Enter the probability at which the blocks will be generated: '
            ))
        self.init_maze(dim, p)
        heu = input(
            'Enter type of heuristic (\'m\' for manhattan/ \'e\' for euclidean): '
        )
        self.fringe.put(
            (self.start.f, 0, self.start))  #adding start cell to the fringe
        self.max_frig_size = self.fringe.qsize()
        self.cost[self.start] = 0
        i = 1
        while not self.fringe.empty():
            f, x, cell = self.fringe.get()
            self.visited.add(
                cell)  #add cells to visited set as and when they're visited
            if cell is self.end:  #if popped cell is target cell, then path has been found
                self.finish = True
                self.max_nodes = i
                return self.display_path()
                #return self.finish
            adj_cells = self.get_adjacent_cells(cell)
            for adj_cell in adj_cells:
                if adj_cell.open and adj_cell not in self.visited:
                    c = self.cost[cell] + 10  #adding g score
                    if adj_cell not in self.cost or self.cost[adj_cell] > c:
                        #if cell hasn't been seen before or has worse cost than the new cost
                        self.cost[adj_cell] = c
                        #update to new cost
                        adj_cell.parent = cell
                        f = c + self.get_h(adj_cell, heu)  #get f value
                        self.fringe.put(
                            (f, i,
                             adj_cell))  #push in fringe with f as priority
                        self.max_frig_size = self.fringe.qsize()
                        i += 1

        if (self.finish == False):
            viz.visualize(self.a)
            print('No Solution!')
示例#17
0
def remove_blocks(maze, q):
    viz.visualize(maze)

    dim = maze.shape[0]
    num_blocks = (maze == SearchUtils.CELL_BLOCKED
                  ).sum()  # Fetch number of blocks in maze
    to_remove = round(num_blocks * q)  # Compute number of blocks to remove

    while to_remove >= 1:
        for row in range(dim):
            for col in range(dim):
                if maze[row][
                        col] == SearchUtils.CELL_BLOCKED and to_remove >= 1:  # if num of blocks is less than 1, then remove blocks
                    rand_prob = random.uniform(0, 1)
                    if rand_prob < q:
                        maze[row][col] = SearchUtils.CELL_OPEN
                        to_remove = to_remove - 1

    viz.visualize(maze)
    return maze
示例#18
0
def main(args):

    cfg = setup(args)
    show = True

    register_openlogo(cfg.DATASETS.TRAIN[0], "datasets/data/openlogo",
                      "trainval", "supervised_imageset")
    register_openlogo(cfg.DATASETS.TEST[0], "datasets/data/openlogo", "test",
                      "supervised_imageset")
    trainer = DefaultTrainer(cfg)

    evaluator = OpenLogoDetectionEvaluator(cfg.DATASETS.TEST[0])

    if args.eval_only:

        model = trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)

        if show:
            visualize(cfg, amount=20)

        res = trainer.test(cfg, model, evaluators=[evaluator])

        if comm.is_main_process():
            verify_results(cfg, res)
        if cfg.TEST.AUG.ENABLED:
            res.update(trainer.test_with_TTA(cfg, model))

        return res

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=args.resume)

    if cfg.TEST.AUG.ENABLED:
        trainer.register_hooks([
            hooks.EvalHook(0,
                           lambda: trainer.test_with_TTA(cfg, trainer.model))
        ])

    return trainer.train()
示例#19
0
def main():
    original_data = pd.read_csv(r'../data/data_with_readme_topics.csv')

    data = preprocess(original_data)

    data = normalize(data)

    # visualizer.save_graphs(data, numeric_columns)

    # Determine clusters
    n_clusters = 4
    print(data.head())
    test_predictions = clusterize(data, n_clusters)

    # Reduce dimensionality for visualizing clusters
    test_reduced = reduce_dimensionality(data)

    # Add column for cluster predictions
    test_reduced['cluster'] = pd.Series(test_predictions)

    visualizer.visualize(test_reduced, n_clusters)

    get_statistics(original_data, test_predictions)
示例#20
0
文件: main.py 项目: kpj/ShRec3D
def main():
    """ Main function
    """
    if len(sys.argv) == 1:
        # simple example
        coords = np.array([
            [1.0,0,0],                  [1.0,1.0,0],
            [1.5,0,0],  [1.5,0.5,0],    [1.5,1.0,0],
            [2.0,0,0],                  [2.0,1.0,0],
            [2.5,0,0],                  [2.5,1.0,0],
            [3.0,0,0],  [3.0,0.5,0],    [3.0,1.0,0],
                        [3.5,0.5,0]
        ])

        #coords = np.array([
        #    [1,0,0],
        #    [1.5,0,0],
        #    [2,0,0],
        #    [2.5,0,0],
        #    [3,0,0]
        #])

        contacts = deconstruct(coords, epsilon=0.51)
        rec_coords = apply_shrec3d(contacts)

        visualize([
            (coords, 'original points'),
            (rec_coords, 'reconstructed points')
        ])
    else:
        fname = sys.argv[1]

        contacts = np.loadtxt(fname)
        rec_coords = apply_shrec3d(contacts)

        np.save('%s.ptcld' % fname, rec_coords)
示例#21
0
 def plot():
     if(request.get_json()['accnum']!=None and request.get_json()['gtype']!=None):
         details={'acnum':request.get_json()['accnum'],'gtype':request.get_json()['gtype'],'genes':request.get_json()['genes'],'sample':request.get_json()['sample'],'number':request.get_json()['number']}
         #obtain the javascript script, HTML DOM element and description corresponding to the plot 
         script,div,description=visualize(details)
         #assemble to be sent as a json object
         data={
             'div':div,
             'description':description,
             'script':script
         }
         print(data)
         #write the new script in the file in the client assets folder
         fwrite(script)
         return jsonify(data)
     else:
             result={"error:Invalid Accession Number"}
             return jsonify(result)
示例#22
0
def testCrossover():
    board = Board(16, 16)
    verticalLine = Path()
    verticalLine.startingPoint = (13, 13)
    verticalLine.segments.append(Segment(Direction.UP, 5))

    horizontalLine = Path()
    horizontalLine.startingPoint = (2, 2)
    horizontalLine.segments.append(Segment(Direction.RIGHT, 6))

    entity = PopulationEntity()
    entity.paths = [verticalLine, horizontalLine]

    verticalLine.mutateSegment(0, 1)
    horizontalLine.mutateSegment(0, 1)

    visualize(entity, board,
              'C:\\Users\\Staszek\\PycharmProjects\\GeneticAlgorithmsPCB\\testresults\\crossover-entity1.png')

    verticalLine2 = Path()
    verticalLine2.startingPoint = (13, 13)
    verticalLine2.segments.append(Segment(Direction.UP, 5))

    horizontalLine2 = Path()
    horizontalLine2.startingPoint = (2, 2)
    horizontalLine2.segments.append(Segment(Direction.RIGHT, 6))

    entity2 = PopulationEntity()
    entity2.paths = [verticalLine2, horizontalLine2]

    visualize(entity2, board,
              'C:\\Users\\Staszek\\PycharmProjects\\GeneticAlgorithmsPCB\\testresults\\crossover-entity2.png')

    resultEntity = crossover(entity, entity2, 0.5)

    visualize(resultEntity, board,
              'C:\\Users\\Staszek\\PycharmProjects\\GeneticAlgorithmsPCB\\testresults\\crossover-child.png')
示例#23
0
strategy = strategies[int(strategy_choice) - 1]
portfolio = OptimizedGreedyPortfolio(bars, events, datetime.date(
    2017, 1, 1)) if portfolio_choice == 2 else NaiveGreedyPortfolio(
        bars, events, datetime.date(2017, 1, 1))
broker = SimulatedExecutionHandler(events, symbols)

while True:
    if bars.continue_backtest:
        bars.update_bars()
    else:
        print('\n'.join([
            '{}: {}'.format(column, value)
            for column, value in portfolio.output_summary_stats()
        ]))
        export_all(bars, portfolio, broker, portfolio.simulation)
        visualize(bars.latest_symbol_data, portfolio.all_holdings,
                  broker.history, portfolio.simulation)
        break

    while True:
        try:
            event = events.get(False)
        except queue.Empty:
            break
        else:
            if event is not None:
                if event.type == 'MARKET':
                    strategy.calculate_signals(event)
                    portfolio.update_time_index(event)
                elif event.type == 'SIGNAL':
                    portfolio.update_signal(event)
                    print(event)
示例#24
0
import numpy as np
import queue
import visualizer as viz
import SearchUtils

# Generate Maze
test_dim = 50
maze = SearchUtils.generate_maze(test_dim, 0.2)

# Show Maze before solving
viz.visualize(maze)

# Perform DFS Search
print(
    "-----------------------------\nDFS Search:\n-----------------------------"
)
result = SearchUtils.dfs_search(maze)
if result['status']:
    print("num moves: %d" % result['num_moves'])
    print("maximum fringe size: %d" % result['max_fringe_size'])
    print("path length: ", len(result['path']))
    print("path: ", result['path'])
    viz.visualize(maze, result['path'])
else:
    print("num moves: %d" % result['num_moves'])
    print("Maze is not solvable.")

# Perform BFS Search
print(
    "\n\n-----------------------------\nBFS Search:\n-----------------------------"
)
示例#25
0
# sg ({0, 1}, optional) – Training algorithm: 1 for skip-gram; otherwise CBOW.
settings = {'workers': 3, 'min_count': 1, 'sg': 1}

model5 = Word2Vec(sentences, window=5, **settings)
model20 = Word2Vec(sentences, window=20, **settings)
model5.save(model_window_5_file)
model20.save(model_window_20_file)

# Sample search
sample_queries = ['one', 'flaw', 'reason']
for query in sample_queries:
    results = model5.most_similar(query)[:3]
    print('Results for query', query, ':', results)

# Visualization
"""
visualize(model5, 'Study in scarlett, window=5, no split', 'model5.png')
visualize(model5, 'Study in scarlett, window=20, no split', 'model20.png')
"""

# Precision@K
k = 30
search_word = 'holmes'
expectations = [
    'sherlock', 'asked', 'remarked', 'mr', 'observed', 'seemed', 'followed'
]
precision_at_k(model5, search_word, None, k, expectations)

# Experimenting with different window size
window_range = [i for i in range(5, 11)]
precision_per_window = []
示例#26
0
def analyze_job(entry, logscan, is_being_removed, live):
    tprfile = _filename(entry, "tpr")
    xtcfile = _filename(entry, "xtc")
    xvgfile = _filename(entry, "xvg")

    resname = entry.attr[ATTR.RESNAME]
    dt = float(entry.attr[ATTR.LOG_DELTA])
    frames_per_ns = int(1000 / dt)

    nstep = int(entry.attr[ATTR.LOG_NSTEP])
    last = int(entry.attr[ATTR.OLD_STEPS]) / nstep
    now = int(entry.attr[ATTR.NUM_STEPS]) / nstep
    delta = now - last
    nanoseconds = int(now / frames_per_ns)  # how many ns have passed?
    end = nanoseconds * frames_per_ns  # how many frames for this many ns?
    # this rounds the current end value to the nearest ns

    if ATTR.VIS_MARK not in entry.attr:
        entry.attr[ATTR.VIS_MARK] = "0"

    print(
        (
            "Analysis, step numbers: dt|{0:0.2f} fpn|{1:0.0f} log#steps|"
            + "{2:0.0f} last|{3:0.0f} now|{4:0.0f} ns|{5:0.0f} vis|{6:0.0f}"
            + " end|{7:0.0f}"
        ).format(dt, frames_per_ns, nstep, last, now, nanoseconds, int(entry.attr[ATTR.VIS_MARK]), end)
    )

    message1 = None
    message2 = None
    if "Count" in logscan.log_entries:
        data = logscan.log_numbers["Count"]
        count = []
        for number in data:
            count.append(int(number))
        if len(count) > 0:
            count = numpy.array(count, numpy.float32)
            message1 = "[" + numpy.array2string(count, precision=0, separator=", ") + "]"
            avg = numpy.average(count)
            count /= avg
            count /= numpy.min(count)
            message2 = "[" + numpy.array2string(count, precision=1, separator=", ") + "]"

    if message1 is not None:
        print("Current number of samples:")
        print(message1)
        print("Current ratio of samples:")
        print(message2)

    if delta <= 0:
        print("Entry " + entry.jobname + " appears to have made no progress")
        print("Old: {0}, New: {1}, Delta: {2}".format(last, now, delta))

    if (int(entry.attr[ATTR.VIS_MARK]) < end) or is_being_removed:
        workdir = os.path.join("daemon-vis", "")
        fldrname = os.path.basename(os.path.dirname(xtcfile))
        newfldr = os.path.join(workdir, fldrname, "")

        if not os.path.exists(workdir):
            if live:
                os.mkdir(workdir)
            else:
                print("DRYRUN: Would create folder " + workdir)

        if not os.path.exists(newfldr):
            if live:
                os.mkdir(newfldr)
            else:
                print("DRYRUN: Would create folder " + newfldr)

        for fname in [tprfile, xtcfile, xvgfile]:
            cmd = "cp " + fname + " " + newfldr
            if live:
                print(cmd)
                os.system(cmd)
            else:
                print("DRYRUN: Would " + cmd)

        tpr2 = os.path.join(newfldr, os.path.basename(tprfile))
        xtc2 = os.path.join(newfldr, os.path.basename(xtcfile))
        xvg2 = os.path.join(newfldr, os.path.basename(xvgfile))

        start = int(entry.attr[ATTR.VIS_MARK])
        if is_being_removed:
            end = now
        length = end - start

        if live:
            visualizer.visualize(
                tpr2, xtc2, xvg2, resname, nstart=start, nlength=length, doCenter=True, doVMD=True, timedelta=dt
            )
        else:
            print("DRYRUN: Would make a call to visualizer to generate vmd" + " instructions using " + xtc2)
        entry.attr[ATTR.VIS_MARK] = str(end)
    else:
        print("Waiting for a nanosecond of simulation to pass before" + " visualizing.")
示例#27
0
文件: main.py 项目: haverzard/CFF
    n = int(input("Enter number of countries: "))
    for i in range(n):
        name, allocation, incoming = input(
            "Country Data ('Name Max_Alloc Incoming_Connections'): ").split(
                ' ')
        total += int(allocation)
        nodes[name] = Node(name)
        nodes[source].addEdge(name, int(incoming))
        nodes[name].addEdge(dest, int(allocation))

    k = input("Enter number of edges: ")
    print("Enter <source> <target> <capacity>: ")
    for i in range(int(k)):
        line = input("")
        line = line.split(' ')
        if (len(line) == 3):
            nodes[line[0]].addEdge(line[1], int(line[2]))

    # Copy nodes
    before_nodes = copy.deepcopy(nodes)

    # Dinic's algorithm
    flow = dinic(source, dest, nodes)
    print("The number of servers allocated statically: {}".format(flow))
    print("The number of servers allocated dynamically: {}".format(total -
                                                                   flow))

    # Save the graph for visualization
    visualize(before_nodes, nodes, "output/problem.png", "output/solution.png")
except:
    print("Bad Input!")
示例#28
0
                      workers=TILER_WORKERS,
                      tile_processor_queue=tile_processor_pool.get_queue())
        tiler.run()

        results = tile_processor_pool.gather_results()

        print(json.dumps({'tilingComplete': args.slidepath}))

        total_points_found, hpf_centers, hpf_points = find_hpfs(results)
        hpfs = list(zip(hpf_centers, hpf_points))

        basename = os.path.basename(args.slidepath)
        with OpenSlide(args.slidepath) as slide:
            hpf_data = visualize(slide,
                                 hpf_centers,
                                 hpf_points,
                                 dir='.',
                                 basename=basename)
        elapsed = time.time() - start

        tiler.cleanup()

        shutil.rmtree(TMP_DIR)

        print(
            json.dumps({
                'processingComplete':
                args.slidepath,
                'elapsedTime':
                time.strftime("%H:%M:%S", time.gmtime(elapsed)),
                'eosinophilCount':
 def loop(i):
     for j in range(vis_steps):
         update_func(k, curr_alpha=alpha_stepper(i * vis_steps + j), curr_diameter=diameter_stepper(i * vis_steps + j))
     visualize(k, points, lines_collection)
     return points, lines_collection
示例#30
0
 
 # forward evaluation (loop over batches)
 loss_testing = 0.
 for bb in range(args.batch_size):
     x0 = network.initialize(input_data[bb:bb+1,...].to(device), device=device)
     xN_test, _ = evaluate(network.network, x0, testFlag = True, device = device)
     loss_tmp = loss_func(output_data[bb:bb+1,...].to(device),xN_test)
     with torch.no_grad():
         loss_testing += loss_tmp.cpu().numpy()
 
 
 # tensorboard writer
 if args.tensorboard:
     # visualizing
     with torch.no_grad():
         fig = visualizer.visualize(network.grad.C.data.cpu().numpy(), metadata)
     os.system('mkdir -p ' + exp_dir + '/tmp/')
     img_file_path = exp_dir + '/tmp/leds_{0:4d}.png'.format(ii)
     fig.savefig(img_file_path, transparent=True, dpi=150)
     plt.close()
     led_img = mpimg.imread(img_file_path)[...,:3]
     
     
     # writing to tensorboard
     writer.add_scalar('Loss/test.py', loss_testing/args.batch_size, ii)
     writer.add_scalar('Loss/train', loss_training/args.batch_size, ii)
     writer.add_image('Visual/leds', led_img, ii, dataformats='HWC')
     
     
     # saving checkpoints
     saveDict = {'model_state_dict':network.network.state_dict(),
示例#31
0
import numpy

class Field:
    def __init__(self, filename=None):
        if filename:
            self.data = numpy.array(
                [map(lambda x: int(x) * 255, line.rstrip("\n"))
                 for line in open(filename)
                 if len(line) > 1])

            print self.data

if __name__ == "__main__":
    import visualizer
    fieldObj = Field("test01.fld")
    visualizer.visualize((10, 10), None, None, None, fieldObj)
    

    visualizer.mainloop()
示例#32
0
from visualizer import visualize
from PIL import Image
import numpy as np
from utils import plot_confusion_matrix
import matplotlib.pyplot as plt


'''
imagePath = '.\\predict\\test1.jpg'
maskPath = '.\\predict\\mask1.png'
prediPath = '.\\predict\\predict1.png'

imageBase = Image.open(imagePath).convert('RGB')
imageBase = imageBase.resize(size=(256, 256))

mask = Image.open(maskPath)
mask = mask.resize(size=(256, 256))

visualize(imageBase, mask)
'''



matrix_data = np.array([[2, 0, 0], [0, 0, 1], [1, 0, 2]])

plot_confusion_matrix(cm=matrix_data, normalize=True, target_names=['Background', 'Carry'],
                      title='Confusion Matrix', cmap=plt.cm.Blues)
示例#33
0
def main():

    audio = "DTTM.ogg"
    features = ads.split(audio, SIGMA)
    vis.visualize(features, THEME_GREEN, IMAGE_SIZE)
示例#34
0
            memory.push(state, action, next_state, reward)

        joint_state = joint_next_state

        #with open("array.txt", "wb") as f:
        #    f.write("%s\n" % np.array(temp_state.cpu())) # %
        #f.write('\n')

        if len(memory) > args.batch_size * 5:
            for _ in range(args.updates_per_step):
                transitions = memory.sample(args.batch_size)
                batch = Transition(*zip(*transitions))
                #agent.update_parameters_dpp(batch) # todo: need to see how update_parameters_dpp is different from other
                agent.update_parameters(batch)
    #path_reward = calc_dpp_path(env.rover_path, poi_vals, env.poi_pos) # calculate D++ reward for the entire trajectory

    #if i_episode % args.test_frequency == 0:
    if i_episode % 10 == 0:
        #env.render()
        tracker.update([episode_reward], i_episode)
        #print(i_episode, episode_reward/args.num_timesteps)
        print(
            'Episode: {}, noise: {:.5f}, reward: {:.5f}, average reward: {:.5f}'
            .format(i_episode, ounoise.scale, float(episode_reward),
                    float(episode_reward / args.num_timesteps)))

###### once the training is over, test the policies ###############
if args.visualization:
    visualize(env, episode_reward)

input("Press Enter to continue...")
import visualizer
import stochastic


def getdata():
    parabolas = []
    n = int(input())
    for i in range(n):
        a, move_x, move_y, rotation = map(float, input().split())
        p = Parabola(a, move_x, move_y, rotation)
        parabolas.append(p)
    return parabolas


def test(par):
    p = (5, 5)
    p1 = par.transform(p[0], p[1])
    print(p1)
    p2 = par.back_transform(p1[0], p1[1])
    print(p2)


if __name__ == '__main__':
    parabolas = getdata()
    test(parabolas[0])
    visualizer.visualize(parabolas)
    print(stochastic.area(parabolas, -10, 10, -10, 10))
    print(
        stochastic.integrate(lambda x, y: x**2 * y**2, parabolas, -10, 10, -10,
                             10))