Beispiel #1
0
def main():
    args = argparser.parse_arguments(ROOT_PATH)

    print('loading map')
    projector = lanelet2.projection.UtmProjector(
        lanelet2.io.Origin(args['lat_origin'], args['lon_origin']))
    laneletmap = lanelet2.io.load(args['lanelet_map'], projector)
    criticalAreas = readCriticalAreas(args['critical_areas_file'], laneletmap)
    print('critical areas read: ')
    print(
        yaml.safe_dump(
            list(map(lambda x: x.toDict(), criticalAreas.critical_areas))))
Beispiel #2
0
def main() :
    args = argparser.parse_arguments(ROOT_PATH)

    print('loading map')
    projector = lanelet2.projection.UtmProjector(lanelet2.io.Origin(args['lat_origin'], args['lon_origin']))
    laneletmap = lanelet2.io.load(args['lanelet_map'], projector)

    print('drawing map')
    fig, axes = plt.subplots(1, 1)
    fig.canvas.set_window_title("Map Visualization")
    drawing_utils.draw_lanelet_map(laneletmap, axes)

    plt.show()
def load_code():
    '''
    Loads the Befunge code from an external file.
    '''
    ARGS = ap.parse_arguments()
    try:
        with open(ARGS.befunge_file, "r") as c:
            codelist = c.read().splitlines()
        if codelist:
            return codelist
        else:
            return [" "]
    except Exception as er:
        ap.parser.error(str(er))
Beispiel #4
0
def main():
    args = argparser.parse_arguments(ROOT_PATH)

    print('loading map')
    projector = lanelet2.projection.UtmProjector(
        lanelet2.io.Origin(args['lat_origin'], args['lon_origin']))
    laneletmap = lanelet2.io.load(args['lanelet_map'], projector)
    trafficRules = lanelet2.traffic_rules.create(
        lanelet2.traffic_rules.Locations.Germany,
        lanelet2.traffic_rules.Participants.Vehicle)
    graph = lanelet2.routing.RoutingGraph(laneletmap, trafficRules)

    print('analyzing map')
    # criticalAreas = map_analyzer.getAllConflictingAreas(laneletmap, graph)
    # divergingPoints = map_analyzer.getAllDivergingPoints(laneletmap, graph)
    # criticalAreas.extend(divergingPoints)
    criticalAreas = map_analyzer.getAllCriticalAreas(
        laneletmap, graph, args['critical_area_sim_thresh'])

    print('writing map analysis')
    writeOutCriticalAreas(criticalAreas, args['critical_areas_file'])
Beispiel #5
0
def main():
    args = argparser.parse_arguments(ROOT_PATH)

    print('loading map')
    projector = lanelet2.projection.UtmProjector(
        lanelet2.io.Origin(args['lat_origin'], args['lon_origin']))
    laneletmap = lanelet2.io.load(args['lanelet_map'], projector)
    trafficRules = lanelet2.traffic_rules.create(
        lanelet2.traffic_rules.Locations.Germany,
        lanelet2.traffic_rules.Participants.Vehicle)
    graph = lanelet2.routing.RoutingGraph(laneletmap, trafficRules)

    print('drawing map')
    fig, axes = plt.subplots(1, 1)
    fig.canvas.set_window_title("Map Visualization")
    drawing_utils.draw_lanelet_map(laneletmap, axes)

    print('analyzing map')
    criticalAreas = map_analyzer.getAllCriticalAreas(laneletmap, graph, 3.0)
    drawing_utils.draw_critical_areas(criticalAreas, axes)

    plt.show()
Beispiel #6
0
from prep import Preprocessor
import pandas as pd

from argparser import parse_arguments
from dataset import Dataset
from model import DeepPunctuation, DeepPunctuationCRF
from config import *
import augmentation
from yttm import YTTM
from lstm_model import BiLSTM_CNN_CRF
from pqrnn import PQRNN
from transformers import BertForSequenceClassification, BertConfig

torch.multiprocessing.set_sharing_strategy('file_system')

args = parse_arguments()

# for reproducibility
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)

# tokenizer
if args.yttm == 'false':
    tokenizer = MODELS[args.pretrained_model][1].from_pretrained(
        args.pretrained_model)
else:
    tokenizer = YTTM(args.yttm)

augmentation.tokenizer = tokenizer
def get_inputv(inp):
    input_stack = torch.FloatTensor().cuda()
    input_stack.resize_as_(inp.float()).copy_(inp)
    inputv = Variable(input_stack)
    return inputv

app = Flask(__name__)
CORS(app)  # 解决跨域问题

# select device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

# code for TextureGAN
command = '--display_port 7770 --load 0 --load_D -1 --load_epoch 105 --gpu 2 --model texturegan --feature_weight 1e2 --pixel_weight_ab 1e3 --global_pixel_weight_l 1e3 --local_pixel_weight_l 0 --style_weight 0 --discriminator_weight 1e3 --discriminator_local_weight 1e6  --learning_rate 1e-4 --learning_rate_D 1e-4 --batch_size 36 --save_every 50 --num_epoch 100000 --save_dir /home/psangkloy3/skip_leather_re/ --load_dir /home/psangkloy3/skip_leather_re/ --data_path ../../training_handbags_pretrain/ --learning_rate_D_local  1e-4 --local_texture_size 50 --patch_size_min 20 --patch_size_max 40 --num_input_texture_patch 1 --visualize_every 5 --num_local_texture_patch 1'
args = parse_arguments(command.split())

args.batch_size = 1
args.image_size = 256
args.resize_max = 256
args.resize_min = 256
# args.data_path = '/home/psangkloy3/training_handbags_pretrain/' #change to your data path
args.data_path = '/media/jay2019/DATA/Study/8.创新杯/3.现有代码/clothes_data/clothes_data'
#args.data_path = './dataset/training_handbags_pretrain'
#args.data_path = './dataset/training_shoes_pretrain'
#args.data_path = './dataset/j_test_shoes'

transform = get_transforms(args)
# val = make_dataset(args.data_path, 'val')
# valDset = ImageFolder('val', args.data_path, transform)
# val_display_size = 1
Beispiel #8
0
            feat_model.features,
            [layers_map[x.strip()] for x in args.style_layers.split(',')])

        model = {
            "netG": netG,
            "netD": netD,
            "netD_local": netD_local,
            "criterion_gan": criterion_gan,
            "criterion_pixel_l": criterion_pixel_l,
            "criterion_pixel_ab": criterion_pixel_ab,
            "criterion_feat": criterion_feat,
            "criterion_style": criterion_style,
            "criterion_texturegan": criterion_texturegan,
            "real_label": real_label,
            "fake_label": fake_label,
            "optimizerD": optimizerD,
            "optimizerD_local": optimizerD_local,
            "optimizerG": optimizerG
        }

        for epoch in range(args.load_epoch, args.num_epoch):
            train(model, train_loader, val_loader, input_stack, target_img,
                  target_texture, segment, label, label_local, extract_content,
                  extract_style, loss_graph, vis, epoch, args)
            #break


if __name__ == '__main__':
    args = argparser.parse_arguments()
    main(args)
Beispiel #9
0
def main():
    from argparser import parse_arguments
    args = parse_arguments(ROOT_PATH)
    print(ROOT_PATH)
    processing_pipeline.startProcessingPipeline(args)
Beispiel #10
0
    "g": lambda x1, x2: stackstack[-1].append(ord(the_field.get_char(x2, x1)))}

# Global constants related to pygame
CHAR_WIDTH = 12
CHAR_HEIGHT = 28
SCREEN_HEIGHT_MODIFIER = 300
SCREEN_HEIGHT = the_field.Y * CHAR_HEIGHT + SCREEN_HEIGHT_MODIFIER
SCREEN_WIDTH = the_field.X * CHAR_WIDTH + 500
BG_COLOR = (52, 52, 52)
STACK_BG_COLOR = (0, 0, 0, 100)
STACK_OUTPUT_COLOR = (230, 200, 70)
SOSS_OUTPUT_COLOR = (70, 230, 200)
POINTER_COLOR = (255, 255, 255, 130)
STACK_CHAR_HEIGHT = 16
STACK_CHAR_WIDTH = 10
ARGS = ap.parse_arguments()
_paused = False
_step_once = False
_reset = False

# Pygame surface inits
if not ARGS.OUTPUT_MODE:
    screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
    background = pygame.Surface(screen.get_size()).convert()
    pointer_rect = pygame.Surface((CHAR_WIDTH, CHAR_HEIGHT), pygame.SRCALPHA)
    pointer_rect.fill(POINTER_COLOR)
    stacksurf = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT_MODIFIER),
                               pygame.SRCALPHA)
    outsurf = pygame.Surface((int(float(SCREEN_WIDTH) / 2.0),
                             SCREEN_HEIGHT_MODIFIER),
                             pygame.SRCALPHA)
def main():
    from argparser import parse_arguments
    args = parse_arguments(ROOT_PATH)
    print(ROOT_PATH)
    print('loading map...')
    projector = lanelet2.projection.UtmProjector(
        lanelet2.io.Origin(args['lat_origin'], args['lon_origin']))
    laneletmap = lanelet2.io.load(args['lanelet_map'], projector)
    trafficRules = lanelet2.traffic_rules.create(
        lanelet2.traffic_rules.Locations.Germany,
        lanelet2.traffic_rules.Participants.Vehicle)
    graph = lanelet2.routing.RoutingGraph(laneletmap, trafficRules)

    print('analyzing map...')
    criticalAreas = map_analyzer.getAllCriticalAreas(
        laneletmap, graph, args['critical_area_sim_thresh'])
    laneletCaDict = map_analyzer.createLaneletCriticalAreaDict(criticalAreas)

    print('loading trackfiles')
    track_dictionary = dataset_reader.read_tracks(
        args['interaction_trackfile'])

    timestamp_min = 1e9
    timestamp_max = 0
    timestamp_delta_ms = 100
    for key, track in iter(track_dictionary.items()):
        timestamp_min = min(timestamp_min, track.time_stamp_ms_first)
        timestamp_max = max(timestamp_max, track.time_stamp_ms_last)
    timestamp = timestamp_min
    patchesDict = dict()
    textDict = dict()

    visualize = args['visualize']

    while True:
        random_trackid = random.randint(1, len(track_dictionary))
        if random_trackid in track_dictionary:
            print('trackid %s is found' % (random_trackid))
            break

    random_trackid = 7
    print(random_trackid)

    if visualize:
        fig, axes = plt.subplots(1, 1)
        fig.canvas.set_window_title("Prediction Visualization")
        drawing_utils.draw_fancy_lanelet_map(laneletmap, axes)
        drawing_utils.draw_critical_areas(criticalAreas, axes)
        title_text = fig.suptitle("")
        fig2, axes2 = plt.subplots(1, 1)
        # axes2.set_title("Data Visualization for random track: ",random_trackid)
        fig2.canvas.set_window_title("Data Visualization for track %s " %
                                     (random_trackid))
        plt.xlim(track_dictionary[random_trackid].time_stamp_ms_first,
                 track_dictionary[random_trackid].time_stamp_ms_last)
        #plt.ylim(-3.2, 3.2)
        plt.plot([
            track_dictionary[random_trackid].time_stamp_ms_first,
            track_dictionary[random_trackid].time_stamp_ms_last
        ], [0, 0],
                 c='k')
        colors = ['g', 'b', 'c', 'm', 'y', 'k', 'orange', 'aqua', 'lime']
        markers = ['x', '+', 'v', '^', '1', '2', '3', '4', '5']
        plt.ion()
        plt.show()

    activeObjects = dict()
    while timestamp < timestamp_max:
        if visualize:
            start_time = time.time()

        currentTracks = interaction_dataset_utilities.getVisibleTracks(
            timestamp, track_dictionary)

        possiblePathParams = lanelet2.routing.PossiblePathsParams()
        possiblePathParams.includeShorterPaths = True
        possiblePathParams.includeLaneChanges = False
        for track in currentTracks:
            currentMs = track.motion_states[timestamp]
            if track.track_id not in activeObjects:
                vehicleState = predictiontypes.Vehicle(objectId=track.track_id,
                                                       motionState=currentMs,
                                                       width=track.width,
                                                       length=track.length)
                possiblePathsWithInfo = []
                matchings = prediction_utilities.matchMotionState(
                    laneletmap,
                    currentMs)  # match the car to several possible lanelets
                for match in matchings:  # for each start lanelet
                    possiblePathParams.routingCostLimit = lanelet2.geometry.approximatedLength2d(
                        match.lanelet) + 150
                    paths = map(
                        lambda x: predictiontypes.PathWithInformation(
                            laneletPath=x, caDict=laneletCaDict),
                        # caDict means conflict
                        graph.possiblePaths(match.lanelet, possiblePathParams))
                    possiblePathsWithInfo.extend(paths)
                vehicleState.pathsWithInformation = possiblePathsWithInfo
                activeObjects[track.track_id] = vehicleState
            else:
                vehicleState = activeObjects[track.track_id]
            vehicleState.update(currentMs)

        prediction_utilities.removeInactiveObjects(activeObjects, timestamp)

        # TODO: continue here - calculate matching, build lanelet->critical area dictionary, associate track -> next ca, estimate state

        if visualize:
            plt.sca(axes)
            # plt.axis('off')
            # drawing_utils.draw_motion_states(track_dictionary, timestamp, axes, patchesDict, textDict)
            drawing_utils.draw_vehicle_states(activeObjects, axes, patchesDict,
                                              textDict)
            prediction_utilities.cleanDrawingDicts(activeObjects, patchesDict,
                                                   textDict)
            # fig.canvas.draw()
            title_text.set_text("\nts = {}".format(timestamp))
            if random_trackid in activeObjects.keys():
                plt.sca(axes2)
                plt.title('Arc distance for all paths')
                basic_point = lanelet2.core.BasicPoint2d(
                    track_dictionary[random_trackid].motion_states[timestamp].
                    x, track_dictionary[random_trackid].
                    motion_states[timestamp].y)
                in_area = 0
                for i in range(len(criticalAreas.critical_areas)):
                    area_center = lanelet2.core.BasicPoint2d(
                        criticalAreas.critical_areas[i].x,
                        criticalAreas.critical_areas[i].y)
                    if lanelet2.geometry.distance(
                            basic_point, area_center
                    ) <= criticalAreas.critical_areas[i].radius:
                        plt.scatter(timestamp,
                                    track_dictionary[random_trackid].
                                    motion_states[timestamp].psi_rad,
                                    c='k',
                                    s=10,
                                    label='vehicle %s in critical area' %
                                    random_trackid)
                        in_area = 1
                        break
                if in_area == 0:
                    plt.scatter(timestamp,
                                track_dictionary[random_trackid].
                                motion_states[timestamp].psi_rad,
                                c='r',
                                s=1,
                                label='vehicle %s orientation' %
                                random_trackid)

                for i in range(
                        len(activeObjects[random_trackid].
                            arcCoordinatesAlongPaths)):  # for each path
                    arc_distance = activeObjects[
                        random_trackid].arcCoordinatesAlongPaths[i].distance
                    plt.scatter(timestamp,
                                arc_distance,
                                c=colors[i],
                                s=10,
                                label='path arcCoordinate distance %s' % (i),
                                marker=markers[i])

            end_time = time.time()
            if timestamp == track_dictionary[
                    random_trackid].time_stamp_ms_first:
                plt.legend()
            plt.pause(
                max(0.001,
                    timestamp_delta_ms / 1000. - (end_time - start_time)))
        timestamp += timestamp_delta_ms
    if visualize:
        plt.ioff()
    return
def main():
    args = argparser.parse_arguments(ROOT_PATH)

    print('loading map')
    projector = lanelet2.projection.UtmProjector(lanelet2.io.Origin(args['lat_origin'], args['lon_origin']))
    laneletmap = lanelet2.io.load(args['lanelet_map'], projector)
    trafficRules = lanelet2.traffic_rules.create(lanelet2.traffic_rules.Locations.Germany,
                                                 lanelet2.traffic_rules.Participants.Vehicle)
    graph = lanelet2.routing.RoutingGraph(laneletmap, trafficRules)

    print('analyzing map')
    criticalAreas = map_analyzer.getAllCriticalAreas(laneletmap, graph, args['critical_area_sim_thresh'])
    laneletCaDict = map_analyzer.createLaneletCriticalAreaDict(criticalAreas)

    print('loading trackfiles')
    track_dictionary = dataset_reader.read_tracks(args['interaction_trackfile'])
    while True:
        randomTrackId = randint(1, 115)
        if randomTrackId not in track_dictionary:
            print('trackid %s not found' % (randomTrackId))
            continue
        track = track_dictionary[randomTrackId]
        timestamp = track.time_stamp_ms_first

        vehicleState = predictiontypes.Vehicle(objectId=track.track_id, motionState=track.motion_states[timestamp],
                                               width=track.width, length=track.length)
        matchings = prediction_utilities.matchMotionState(laneletmap, track.motion_states[timestamp])
        pathsWithInfo = []

        possiblePathParams = lanelet2.routing.PossiblePathsParams()
        possiblePathParams.includeShorterPaths = True
        possiblePathParams.includeLaneChanges = False

        for match in matchings:
            possiblePathParams.routingCostLimit = lanelet2.geometry.approximatedLength2d(match.lanelet) + 150
            paths = map(lambda x: predictiontypes.PathWithInformation(laneletPath=x, caDict=laneletCaDict),
                        graph.possiblePaths(match.lanelet, possiblePathParams))
            pathsWithInfo.extend(paths)
        vehicleState.pathsWithInformation = pathsWithInfo

        trackOrientations = []
        timestamps = []
        pathOrientations = [[] for j in range(len(pathsWithInfo))]

        dt = 100
        while timestamp <= track.time_stamp_ms_last:
            timestamps.append(timestamp)
            trackOrientations.append(track.motion_states[timestamp].psi_rad)
            for i in range(len(pathsWithInfo)):
                basicPoint = lanelet2.core.BasicPoint2d(track.motion_states[timestamp].x,
                                                        track.motion_states[timestamp].y)
                arcCoordinates = lanelet2.geometry.toArcCoordinates(pathsWithInfo[i].centerline, basicPoint)
                pathOrientations[i].append(pathsWithInfo[i].getOrientationAtArcLength(arcCoordinates.length))
            timestamp += dt

        fig, axes = plt.subplots(1, 1)
        fig.canvas.set_window_title("Orientation of Track %s and its matching paths" % (randomTrackId))
        plt.xlim(track.time_stamp_ms_first, track.time_stamp_ms_last)
        plt.ylim(-math.pi, math.pi)
        colors = ['g', 'b', 'c', 'm', 'y', 'k', 'orange', 'aqua', 'lime']
        markers = ['x', '+', 'v', '^', '1', '2', '3', '4', '5']
        plt.ion()
        plt.show()
        plt.sca(axes)
        plt.scatter(timestamps, trackOrientations, c='r', s=2, label='Track orientation')
        plt.plot(timestamps, trackOrientations, c='r')
        for i in range(len(pathsWithInfo)):
            plt.scatter(timestamps, pathOrientations[i], c=colors[i], label='Path number %s' % (i), marker=markers[i])
            plt.plot(timestamps, pathOrientations[i], c=colors[i])
        plt.legend()
        plt.waitforbuttonpress()
        plt.ioff()
def main():
    from argparser import parse_arguments
    args = parse_arguments(ROOT_PATH)
    print(ROOT_PATH)
    print('loading map...')
    map_name = (args['lanelet_map'].split('/')[-1]).split('.')[0]
    trackfile = (args['interaction_trackfile'].split('/')[-1]).split('_')[-1]
    projector = lanelet2.projection.UtmProjector(
        lanelet2.io.Origin(args['lat_origin'], args['lon_origin']))
    laneletmap = lanelet2.io.load(args['lanelet_map'], projector)
    trafficRules = lanelet2.traffic_rules.create(
        lanelet2.traffic_rules.Locations.Germany,
        lanelet2.traffic_rules.Participants.Vehicle)
    graph = lanelet2.routing.RoutingGraph(laneletmap, trafficRules)

    print('analyzing map...')
    criticalAreas = map_analyzer.getAllCriticalAreas(
        laneletmap, graph, args['critical_area_sim_thresh'])
    laneletCaDict = map_analyzer.createLaneletCriticalAreaDict(criticalAreas)

    print('loading trackfiles')
    track_dictionary = dataset_reader.read_tracks(
        args['interaction_trackfile'])

    timestamp_min = 1e9
    timestamp_max = 0
    timestamp_delta_ms = 100
    for key, track in iter(track_dictionary.items()):
        timestamp_min = min(timestamp_min, track.time_stamp_ms_first)
        timestamp_max = max(timestamp_max, track.time_stamp_ms_last)
    timestamp = timestamp_min
    patchesDict = dict()
    textDict = dict()

    #parameter setting
    visualize = args['visualize']
    Deviation_alongarcCoordinate = 4
    Time_difference_max = 3
    Time_gap = 5
    Default_gap = 40
    Distance_difference_max = 20
    PathLengthLimit = 500

    if visualize:
        fig, axes = plt.subplots(1, 1)
        fig.canvas.set_window_title("Prediction Visualization in %s for %s" %
                                    (map_name, trackfile))
        drawing_utils.draw_fancy_lanelet_map(laneletmap, axes)
        drawing_utils.draw_critical_areas(criticalAreas, axes)
        title_text = fig.suptitle("")
        fig2, axes2 = plt.subplots(1, 1)
        fig2.canvas.set_window_title("Dependency Visualization in %s for %s" %
                                     (map_name, trackfile))
        G = nx.DiGraph()
        plt.ion()
        plt.show()

    activeObjects = dict()
    while timestamp < timestamp_max:
        if visualize:
            start_time = time.time()

        currentTracks = interaction_dataset_utilities.getVisibleTracks(
            timestamp, track_dictionary)

        # possiblePathParams = lanelet2.routing.PossiblePathsParams()
        # possiblePathParams.includeShorterPaths = True
        # possiblePathParams.includeLaneChanges = False
        for track in currentTracks:
            currentMs = track.motion_states[timestamp]
            if track.track_id not in activeObjects:
                vehicleState = predictiontypes.Vehicle(
                    objectId=track.track_id,
                    motionState=currentMs,
                    width=track.width,
                    length=track.length,
                    timestamp_first=track.time_stamp_ms_first)
                possiblePathsWithInfo = []
                matchings = prediction_utilities.matchMotionState(
                    laneletmap,
                    currentMs)  # match the car to several possible lanelets
                for match in matchings:  # for each start lanelet
                    #possiblePathParams.routingCostLimit = lanelet2.geometry.approximatedLength2d(match.lanelet) + 300   # a very important value. it means how far(meters) to consider
                    # paths = map(lambda x: predictiontypes.PathWithInformation(laneletPath=x, caDict=laneletCaDict),
                    #             # caDict means conflict
                    #             graph.possiblePaths(match.lanelet, possiblePathParams))
                    #possiblePathsWithInfo.extend(paths)
                    Pathset = possiblepath_calculate(
                        matching=match,
                        map_graph=graph,
                        pathLengthLimit=PathLengthLimit)
                    paths2 = map(lambda x: predictiontypes.PathWithInformation(
                        laneletPath=x, caDict=laneletCaDict),
                                 Pathset)  # caDict means conflict
                    possiblePathsWithInfo.extend(paths2)
                vehicleState.pathsWithInformation = possiblePathsWithInfo
                activeObjects[track.track_id] = vehicleState
            else:
                vehicleState = activeObjects[track.track_id]
            vehicleState.update(currentMs)

        prediction_utilities.removeInactiveObjects(activeObjects, timestamp)
        # TODO: continue here - calculate matching, build lanelet->critical area dictionary, associate track -> next ca, estimate state

        if visualize:
            plt.sca(axes)
            drawing_utils.draw_vehicle_states(activeObjects, axes, patchesDict,
                                              textDict)
            prediction_utilities.cleanDrawingDicts(activeObjects, patchesDict,
                                                   textDict)
            title_text.set_text("\nts = {}".format(timestamp))

            dependency_node, dependency_edges, rightofway_info, front_car_pairs, conflicting_car_pairs = dependency_calculate(
                activeObjects, track_dictionary, timestamp, Default_gap,
                Deviation_alongarcCoordinate, Time_gap, Time_difference_max,
                Distance_difference_max)
            hidden_intention = hidden_intentions_initialize(
                activeObjects, front_car_pairs, conflicting_car_pairs,
                Deviation_alongarcCoordinate, Distance_difference_max)
            plt.sca(axes2)
            G.clear()
            axes2.cla()
            G.add_nodes_from(dependency_node)
            G.add_edges_from(dependency_edges)
            nx.draw_circular(G, with_labels=True)

            end_time = time.time()
            plt.pause(
                max(0.001,
                    timestamp_delta_ms / 1000. - (end_time - start_time)))
        timestamp += timestamp_delta_ms
    if visualize:
        plt.ioff()
    return