Пример #1
0
def main():

    print('\npytictoc file:')
    print(pytictoc.__file__ + '\n')

    t = pytictoc.TicToc()

    t.tic()
    waste_time()
    t.toc()

    with pytictoc.TicToc():
        waste_time()

    t.toc('It has been', restart=True)
    t.toc()

    spam = t.tocvalue()
    print(spam)

    waste_time()

    spam = t.tocvalue(restart=True)
    print(spam)
    t.toc()
Пример #2
0
def main():

    t = pytictoc.TicToc()

    t.tic()

    maps_stats = [
        "München-Ost", "München-Pasing", "Fürstenfeldbruck", "Landshut",
        "Nürnberg", "Augsburg-Rathausplatz", "Rosenheim", "München-Marienplatz"
    ]

    statconns = combinations(maps_stats, 2)

    keyfile = "/home/ubuntu/sbmd/gapi.txt"
    with open(keyfile) as f:
        ak = f.readline()
        f.close

    credfile = "/home/ubuntu/sbmd/dwh.cfg"
    config = configparser.ConfigParser()
    config.read(credfile)

    s3k = config['AWS']['KEY']
    s3ks = config['AWS']['SECRET']

    pool = mp.Pool(mp.cpu_count())

    [pool.apply(gmap_query_all, args=(co, s3k, s3ks, ak)) for co in statconns]

    pool.close()

    t.toc()
Пример #3
0
def main():
    if not os.path.isdir(PATH):
        print('Error- the PATH you have entered dosen\'t exists')
        exit(0)
    timeout = int(input(
        'Please enter a desired recording time (in seconds): '))  # seconds
    feeder = joint_feeder()
    lidar_decoder = LIDAR.vlp16_decoder(feeder.lidar_feeder)
    imu_decoder = IMU.vn200_decoder(feeder.imu_feeder)

    try:
        if WORK_MODE:  # if online than we need to send data to server - creating client object on car
            client = Server_Client.Client()

        N_frames = timeout * 10  # 10 frames per sec (lidar working freq is 10Hz)
        t = pytictoc.TicToc()
        time_val = 0

        if PLOT:
            plt.ion()
            fig = plt.figure(1)
            warnings.filterwarnings("ignore", ".*GUI is implemented.*")
            mng = plt.get_current_fig_manager()
            mng.full_screen_toggle()

        for frame in range(N_frames):
            t.tic()
            # utilities.print_progress(packet, N_frames-1)
            decoded_frame, timestamp_list = lidar_decoder.get_full_frame()
            xyz_time_cones, fov_frame, cones_reflectivity = cone_finder(
                decoded_frame, timestamp_list, FOV)
            if np.all(xyz_time_cones) == None:
                continue
            xyzCones_world, ypr, y_car, x_car = imu_decoder.get_world_coords(
                xyz_time_cones)

            # need to choose to uncomment 1 of 3 options of plotting below
            if PLOT:
                # utilities.plot_3d(fig, xyz_time_cones)
                utilities.plot_2d(xyz_time_cones)
                # lidar_decoder.plot_lidar_cones(fig, fov_frame, xyz_time_cones, frame_time)

            if WORK_MODE:  # if online than we need to send data to server - creating client object on car
                data = xyzCones_world.iloc[:, 0:2].append([[x_car, y_car],
                                                           [ypr[0], ypr[1]],
                                                           [ypr[2], 0]])
                data = np.array(data.values)
                data = data.copy(order='C')
                client.send_packet(data)

            time_val += t.tocvalue()

    except KeyboardInterrupt or SystemError or OSError or TypeError or ValueError:
        feeder.close_joint_feeder(N_frames)

    print('Average frame\'s decoding time: ' +
          str(round((time_val / N_frames), 3)) + ' sec')
    feeder.close_joint_feeder(N_frames)
Пример #4
0
def test_execution_speed(tmpdir):
    """Manual test for how fast we can write to a VCD file

    See https://github.com/SanDisk-Open-Source/pyvcd/issues/9

    """
    fptr = open(os.path.sep.join([str(tmpdir), 'test.vcd']), 'w+')
    t = pytictoc.TicToc()
    t.tic()
    with VCDWriter(fptr, timescale=(10, 'ns'), date='today') as writer:
        counter_var = writer.register_var('a.b.c',
                                          'counter',
                                          'integer',
                                          size=8)
        for i in range(1000, 300000, 300):
            for timestamp, value in enumerate(range(10, 200, 2)):
                writer.change(counter_var, i + timestamp, value)
    fptr.close()
    t.toc()
    def run(self):
        sample_no = 0

        X_batch = []
        Y_batch = np.zeros((self.output_batch_size, self.num_classes))
        id_batch = []

        t = pytictoc.TicToc()
        if self.do_timings:
            t.tic()

        while True:
            for seg in self.segments:
                # should the thread terminate?
                if self.stopped.is_set():
                    print('VideoSegmentReader thread exiting')
                    return

                # load the data for this video segment
                Y = self.classes.index(
                    seg[1])  # transform from a class label to a class index
                idb = seg[2] if self.return_sample_id else []
                X = []
                for pth in seg[0]:
                    dt = np.load(pth)
                    dt = np.array(
                        dt['X'])  # has shape (1, width, height, channels)
                    X.append(dt[0, ...])
                X = np.array(X)
                sample_no += 1

                # append to our batch
                X_batch.append(X)
                Y_batch[sample_no - 1,
                        Y] = 1  # one-hot encoding of the class label
                id_batch.append(idb)

                assert sum(
                    sum(Y_batch)
                ) == sample_no, "Class labels for current batch are not properly one-hot-encoded!"

                # do we have a complete batch?
                if sample_no == self.output_batch_size:
                    try:
                        X_batch = np.array(X_batch)
                        if self.do_timings:
                            t.toc('batch construction')

                        placed_on_queue = False
                        while not placed_on_queue:
                            try:
                                self.queue.put(
                                    (X_batch, Y_batch,
                                     id_batch) if self.return_sample_id else
                                    (X_batch, Y_batch),
                                    block=True,
                                    timeout=10)
                                placed_on_queue = True
                            except queue.Full:
                                placed_on_queue = False

                            if self.stopped.is_set():
                                print('VideoSegmentReader thread exiting')
                                return
                    except:
                        print('ERROR ENCOUNTERED WITH BATCH')
                        pass  # something wrong with this batch. Skip!

                    # reset
                    X_batch, Y_batch, id_batch = [], np.zeros(
                        (self.output_batch_size, self.num_classes)), []
                    sample_no = 0
                    if self.do_timings:
                        t.tic()

            if self.do_shuffle:
                # shuffle each time we do a full iteration through the dataset
                random.shuffle(self.segments)
import boto3
import configparser
import pandas as pd
import os
import s3fs
import json
import pytictoc
import datetime
import logging
from DeArchive import dearchive

t = pytictoc.TicToc()
t.tic()
            
logpath = "/home/ubuntu/sbmd/logs/"
normlogfilename = "sb03blog_" \
      + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M") + ".log"
logging.basicConfig(filename=logpath+normlogfilename, level=logging.DEBUG)

config = configparser.ConfigParser()
config.read("/home/ubuntu/sbmd/dwh.cfg")

rdsid = config['RDS']['ID1']
rdspw = config["RDS"]["PW"]

os.environ['AWS_ACCESS_KEY_ID']=config['AWS']['KEY']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS']['SECRET']

s3r = boto3.resource("s3")
BUCKET = "sbmd1db2"
bucket = s3r.Bucket(BUCKET)
Пример #7
0
def main():

    pool = mp.Pool(mp.cpu_count())

    t = pytictoc.TicToc()

    t.tic()

    potential_stations = [
        "München",
        "Puchheim",
        "Germering",
        "Fürstenfeldbruck",
        "Olching",
        "Gröbenzell",
        "Wolfratshausen",
        "Starnberg",
        "Gernlinden",
        #"Maisach", "Mammendorf", "Schöngeising",
        #"Geltendorf", "Buchenau", "Eichenau", "Murnau",
        #"Hackherbrücke", "Holzkirchen", "Ebersberg",
        #"Grafing",  "Haar",  "Zorneding", "Freising",
        #"Rosenheim", "Augsburg", "Miesbach", "Eichstätt",
        "Ingolstadt",
        "Donauwörth",
        "Unterhaching"
    ]
    #				,
    #"Geretsried", "Taufkirchen", "Erding", "Dachau",
    #"Steinebach", "Tutzing", "Feldafing",
    #"Mühldorf am Inn", "Deggendorf", "Landsberg",
    #"Landshut", "Nürnberg", "Grafrath", "Gräfelfing",
    #"Markt Schwaben", "Icking", "Kempten", "Planegg",
    #"Stockdorf", "Possenhofen", "Gauting", "Gilching",
    #"Türkenfeld", "Petershausen", "Röhrmoos",
    #"Hallbergmoos", "Ismaning", "Bayrischzell",
    #"Unterföhring", "Daglfing", "Unterschleißheim",
    #"Heimstetten", "Tegernsee", "Lenggries",
    #"Aying", "Vaterstetten", "Baldham", "Steinebach",
    #"Weßling", "Deisenhofen", "Sauerlach", "Otterfing",
    #"Kreuzstraße", "Ottobrunn", "Hohenbrunn",
    #"Mittenwald", "Oberschleißheim", "Eching",
    #"Neufahrn", "Altomünster", "Schwabhausen",
    #"Kolbermoor", "Bad Aibling", "Wasserburg am Inn",
    #"Waldkraiburg", "Schrobenhausen",
    #"Garmisch-Partenkirchen", "Schliersee", "Gersthofen"]

    real_stations = pool.map(all_station, [p for p in potential_stations])
    real_stations = list(chain(*real_stations))

    pool.close()

    real_stations = [x for x in real_stations if x]
    real_stations = [x for x in real_stations if x.find(",") == -1]
    real_stations = [x for x in real_stations if x.find(";") == -1]
    real_stations = [x for x in real_stations if x.find("Berlin") == -1]
    real_stations = [x for x in real_stations if x.find("Attnang") == -1]
    real_stations = [x for x in real_stations if x.find("Konstanz") == -1]
    real_stations = [x for x in real_stations if x.find("Kindsbach") == -1]

    #real_stations.remove("Taufkirchen an der Pram")
    #real_stations.remove("Steinebach an der Wied Ort")
    #real_stations.remove('Mittenwalde b Templin Dorf')
    #real_stations.remove("Haarhausen")

    add_stats = [
        "Puchheim Bahnhof Alpenstraße, Puchheim", "Bahnhofstraße, Eichenau",
        "Buchenau, Fürstenfeldbruck", "Bahnhof, Olching",
        "Am Zillerhof, Gröbenzell"
    ]

    for add in add_stats:
        real_stations.append(add)

    stations_iter = itertools.combinations(real_stations, 2)

    stations_iter_list = []

    for sta in stations_iter:
        stations_iter_list.append(sta)

    stations_iter_parts_list = np.array_split(stations_iter_list, 10)

    fileobj = [real_stations, stations_iter, stations_iter_parts_list]

    with open("/home/ubuntu/sbmd/station", "wb") as sf:
        pickle.dump(fileobj, sf)

    credfile = "/home/ubuntu/sbmd/dwh.cfg"

    config = configparser.ConfigParser()
    config.read(credfile)

    s3k = config['AWS']['KEY']
    s3ks = config['AWS']['SECRET']

    s3 = boto3.resource('s3',
                        aws_access_key_id=s3k,
                        aws_secret_access_key=s3ks)
    s3.meta.client.upload_file("/home/ubuntu/sbmd/station", "sbmdother",
                               "station")

    logging.info("Stations gathered succesfully!")
    logging.info(t.toc())
Пример #8
0
def generate_CNN_features_from_flow_data(input_path, input_file_mask, stack_size_K, cnn_model, output_path, groundtruth_file=""):
    # groundtruth data?
    gt = {}
    have_groundtruth_data = False
    if len(groundtruth_file) > 0:
        try:
            # open and load the groundtruth data
            print('Loading groundtruth data...')
            with open(groundtruth_file, 'r') as gt_file:
                gt_lines = gt_file.readlines()
            for gtl in gt_lines:
                gtf = gtl.rstrip().split(' ')
                if len(gtf) == 3:                   # our groundtruth file has 3 items per line (video ID, frame ID, class label)
                    gt[(gtf[0], int(gtf[1]))] = gtf[2]
            print('ok\n')
            have_groundtruth_data = True
        except:
            pass

    tt = pytictoc.TicToc()

    # get all the video folders
    video_folders = os.listdir(input_path)
    video_folders.sort(key=common.natural_sort_key)

    for (i, video_i) in enumerate(video_folders):
        print('processing video %d of %d:  %s' % (i+1, len(video_folders), video_i))

        # create the output folder; if it exists, then CNN features have already been produced - skip the video
        video_i_output_folder = os.path.join(output_path, video_i)
        if not os.path.exists(video_i_output_folder):
            tt.tic()
            os.makedirs(video_i_output_folder)

            # get the list of extracted frames for this video
            video_i_images = glob.glob(os.path.join(input_path, video_i, input_file_mask))
            video_i_images.sort(key=common.natural_sort_key)       # ensure images are in the correct order to preserve temporal sequence
            assert(len(video_i_images) > 0), "video %s has no frames!!!" % video_i

            # for each video frame...
            for image_j in video_i_images:
                frame_id = int(os.path.splitext(os.path.basename(image_j))[0])
                skip_frame = False
                try:
                    skip_frame = True if have_groundtruth_data and gt[(video_i, frame_id)] == '?' else False
                except:
                    pass    # safest option is not to skip the frame

                if skip_frame:
                    print("x", end='', flush=True)
                else:
                    # load the stacked flow data from disk
                    stacked_flow_data = np.load(image_j)
                    if len(stacked_flow_data) < stack_size_K:
                        print("!", end='', flush=True)
                        continue

                    # extract the flow data (encoded as images)
                    X = None
                    for fd in stacked_flow_data:
                        # each element in the array of stacked flow data is encoded as a JPEG-compressed RGB image
                        flow_img = cv2.imdecode(fd, cv2.IMREAD_UNCHANGED)
                        X = np.dstack((X, flow_img)) if X is not None else flow_img

                    X = np.expand_dims(X, axis=0)       # package as a batch of size 1, by adding an extra dimension

                    # generate the CNN features for this batch
                    print(".", end='', flush=True)
                    X_cnn = cnn_model.predict_on_batch(X)

                    # save to disk
                    output_file = os.path.join(video_i_output_folder, os.path.splitext(os.path.basename(image_j))[0] + '.npy')
                    np.savez(open(output_file, 'wb'), X=X_cnn)
            tt.toc()
        print('\n\n')

        if msvcrt.kbhit():  # if a key is pressed
            key = msvcrt.getch()
            if key == b'q' or key == b'Q':
                print('User termination')
                return

    print('\n\nReady')
def generate_CNN_features(input_path, input_file_mask, cnn_model, output_path, groundtruth_file=""):
    # groundtruth data?
    gt = {}
    have_groundtruth_data = False
    if len(groundtruth_file) > 0:
        try:
            # open and load the groundtruth data
            print('Loading groundtruth data...')
            with open(groundtruth_file, 'r') as gt_file:
                gt_lines = gt_file.readlines()
            for gtl in gt_lines:
                gtf = gtl.rstrip().split(' ')
                if len(gtf) == 3:                   # our groundtruth file has 3 items per line (video ID, frame ID, class label)
                    gt[(gtf[0], int(gtf[1]))] = gtf[2]
            print('ok\n')
            have_groundtruth_data = True
        except:
            pass

    tt = pytictoc.TicToc()

    # get all the video folders
    video_folders = os.listdir(input_path)
    video_folders.sort(key=common.natural_sort_key)

    for (i, video_i) in enumerate(video_folders):
        print('processing video %d of %d:  %s' % (i+1, len(video_folders), video_i))

        # create the output folder; if it exists, then CNN features have already been produced - skip the video
        video_i_output_folder = os.path.join(output_path, video_i)
        if not os.path.exists(video_i_output_folder):
            tt.tic()
            os.makedirs(video_i_output_folder)

            # get the list of extracted frames for this video
            video_i_images = glob.glob(os.path.join(input_path, video_i, input_file_mask))
            video_i_images.sort(key=common.natural_sort_key)       # ensure images are in the correct order to preserve temporal sequence 
            assert(len(video_i_images) > 0), "video %s has no frames!!!" % video_i

            # for each video frame...
            for image_j in video_i_images:
                frame_id = int(os.path.splitext(os.path.basename(image_j))[0])
                skip_frame = False
                try:
                    skip_frame = True if have_groundtruth_data and gt[(video_i, frame_id)] == '?' else False
                except:
                    pass    # safest option is not to skip the frame

                if skip_frame:
                    print("x", end='', flush=True)
                else:
                    # load the image and convert to numpy 3D array
                    img = np.array(preprocessing.image.load_img(image_j))

                    # Note that we don't scale the pixel values because VGG16 was not trained with normalised pixel values!
                    # Instead we use the pre-processing function that comes specifically with the VGG16
                    X = preprocess_input(img)

                    X = np.expand_dims(X, axis=0)       # package as a batch of size 1, by adding an extra dimension

                    # generate the CNN features for this batch
                    print(".", end='', flush=True)
                    X_cnn = cnn_model.predict_on_batch(X)

                    # save to disk
                    output_file = os.path.join(video_i_output_folder, os.path.splitext(os.path.basename(image_j))[0] + '.npy')
                    np.savez(open(output_file, 'wb'), X=X_cnn)
            tt.toc()
        print('\n\n')

        if msvcrt.kbhit():  # if a key is pressed 
            key = msvcrt.getch()
            if key == b'q' or key == b'Q':
                print('User termination')
                return

    print('\n\nReady')