示例#1
0
    def __getitem__(self, index):
        input_path, output_path = self.tuple_paths[index]

        if (self.seq_duration != 0.0):
            if self.random_chunks:
                input_info = load_info(input_path)
                output_info = load_info(output_path)
                duration = min(input_info['duration'], output_info['duration'])
                start = random.uniform(0, duration - self.seq_duration)
            else:
                start = 0
            #print("DATA", start)
            X_audio = load_audio(input_path,
                                 start=start,
                                 dur=self.seq_duration)
            Y_audio = load_audio(output_path,
                                 start=start,
                                 dur=self.seq_duration)
        else:
            input_info = load_info(input_path)
            output_info = load_info(output_path)
            start = 0
            duration = min(input_info['duration'], output_info['duration'])
            X_audio = load_audio(input_path, start=start, dur=duration)
            Y_audio = load_audio(output_path, start=start, dur=duration)
        # return torch tensors
        return X_audio, Y_audio
示例#2
0
    def __getitem__(self, index):
        # for validation, get deterministic behavior
        # by using the index as seed
        if self.split == 'valid':
            random.seed(index)

        # For each source draw a random sound and mix them together
        audio_sources = []
        for source in self.source_folders:
            # select a random track for each source
            source_path = random.choice(self.source_tracks[source])
            if self.random_chunks:
                duration = load_info(source_path)['duration']
                start = random.uniform(0, duration - self.seq_duration)
            else:
                start = 0

            audio = load_audio(source_path, start=start, dur=self.seq_duration)
            audio = self.source_augmentations(audio)
            audio_sources.append(audio)
        stems = torch.stack(audio_sources)
        # # apply linear mix over source index=0
        x = stems.sum(0)
        # target is always the last element in the list
        y = stems[-1]
        return x, y
示例#3
0
 def _get_paths(self):
     """Loads input and output tracks"""
     p = Path(self.root, self.split)
     for track_path in tqdm.tqdm(p.iterdir()):
         if track_path.is_dir():
             input_path = list(track_path.glob(self.input_file))
             output_path = list(track_path.glob(self.output_file))
             if input_path and output_path:
                 if self.seq_duration is not None:
                     input_info = load_info(input_path[0])
                     output_info = load_info(output_path[0])
                     min_duration = min(input_info['duration'],
                                        output_info['duration'])
                     # check if both targets are available in the subfolder
                     if min_duration > self.seq_duration:
                         yield input_path[0], output_path[0]
                 else:
                     yield input_path[0], output_path[0]
示例#4
0
 def get_tracks(self):
     """Loads input and output tracks"""
     p = Path(self.root, self.split)
     source_tracks = {}
     for source_folder in tqdm.tqdm(self.source_folders):
         tracks = []
         source_path = (p / source_folder)
         for source_track_path in source_path.glob('*' + self.ext):
             if self.seq_duration is not None:
                 info = load_info(source_track_path)
                 # get minimum duration of track
                 if info['duration'] > self.seq_duration:
                     tracks.append(source_track_path)
             else:
                 tracks.append(source_track_path)
         source_tracks[source_folder] = tracks
     return source_tracks
def main():
    x_train, y_train = utils.load_data(data_dir, 'train')
    x_dev, y_dev = utils.load_data(data_dir, 'dev')
    x_test, y_test = utils.load_data(data_dir, 'test')

    feature_size, label_size = utils.load_info(data_dir)

    model = Logistic(feature_size, label_size, opt['default_thres'])

    # no hyper-prams needed so combining train and dev to train the model.
    # x_train += x_dev
    # y_train += y_dev
    model.fit(x_train, y_train)

    # test set prediction
    y_pred = map(lambda x: int(model.predict(x)[0]), x_test)
    y_test = map(lambda x: x[0], y_test)

    results = {'pred': y_pred, 'gold': y_test}

    filename = os.path.join(opt['save_dir'], opt['save_filename'])
    pickle.dump(results, open(filename, 'wb'))
示例#6
0
def main():

    # Init
    parser = argparse.ArgumentParser(
        description="Batchly generating fits cubes of the patient samples.")
    # Parameters
    parser.add_argument("foldname", help="Path of the patients")
    parser.add_argument("foldsave", help="Path to save the MEF image")
    parser.add_argument("infopath", help="Path of the info file.")
    args = parser.parse_args()

    foldname = args.foldname
    foldsave = args.foldsave
    infopath = args.infopath

    # load patients info and sort
    print("Loading patients info.")
    namelist = utils.load_info(infopath=infopath)

    # load samples' folder names from foldpath
    samplelist = os.listdir(foldname)
    samplelist.sort()

    # Generate cubes
    # samplelist = samplelist[0:2]
    for i in range(len(samplelist)):
        s = samplelist[i]
        print("Processing on patient %s " % s)
        samplepath = os.path.join(foldname, s)
        # load dcmpath
        # dcmlist = utils.sort_filename(samplepath)
        # generate fits cube
        cube_hdu = utils.gen_fits_cube(samplepath=samplepath)
        # save
        savename = str(namelist['id'][i]) + '_cube.fits'
        savepath = os.path.join(foldsave, savename)
        utils.save_fits(hdu=cube_hdu, savepath=savepath)
# x: x position of ball center
# y: y position of ball center
csv_path = args.csv_path
load_csv = False
if os.path.isfile(csv_path) and csv_path.endswith('.csv'):
    load_csv = True
else:
    print("Not a valid csv file! Please modify path in parser.py --csv_path")

# acquire video info
cap = cv2.VideoCapture(video_path)
fps = int(cap.get(cv2.CAP_PROP_FPS))
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

if load_csv:
    info = load_info(csv_path)
    if len(info) != n_frames:
        print("Number of frames in video and dictionary are not the same!")
        print("Fail to load, create new dictionary instead.")
        info = {
            idx: {
                'Frame': idx,
                'Ball': 0,
                'x': -1,
                'y': -1
            }
            for idx in range(n_frames)
        }
    else:
        print("Load labeled dictionary successfully.")
else:
示例#8
0
def get_info():
    return jsonify(load_info())