def capture_video(self, sky, millis_end, millis_start=0, quiet=False):
        """
        Captures a list of images, each one separated from the other at a rate of 1. / fps seconds
        Parameters:
            sky: sky object with stars and cubesats
            millis_end: time at which the last image is taken
            millis_start: time at which the first image is taken
            quiet: if True, no progress bar is displayed
        Returns a list of frames and a list of timestamps
        """
        video = []
        timestamps = []
        millis = millis_start
        base = Image(width=self.camera.cols,
                     height=self.camera.rows)  # Base image with stars only
        progress_bar = ProgressBar('Capturing video', millis_start, millis_end,
                                   quiet)

        sky.draw_stars_on_image(base)
        while millis <= millis_end:
            captured = base.clone()
            sky.draw_cubesats_on_image(captured, millis)
            self._add_noise(captured)
            video.append(captured)
            timestamps.append(millis)
            progress_bar.update(millis)
            millis += 1000. / self.fps

        progress_bar.end()

        return timestamps, video
Ejemplo n.º 2
0
 def fit(self, train_df):
     self.ds = pd.Series(train_df.index)
     print("Fitting...")
     progress_bar = ProgressBar(len(train_df.columns))
     
     for item in train_df.columns:
         target = train_df[item].interpolate().bfill()
         if self.use_boxcox:
             idx = target.index
             target, self.lmbda_boxcox[item] = boxcox(target)
             target = pd.Series(target, index=idx)
         target.index.name = "ds"
         target.name = "y"     
         self.models[item] = pm.auto_arima(
             target,
             seasonal=False,
             exogenous=fourier(
                 len(target), 
                 seasonality=self.seasonality, 
                 n_terms=self.n_fourier_terms), 
             method="bfgs",
             suppress_warnings=True)
         progress_bar.update()
     progress_bar.finish()
     return self.models
    def createDemand(self):
        def determineStartingInterval():
            start_interval = P_constrain(np.random.normal(), -3.0, 3.0)
            start_interval = P_map(start_interval, -3.0, 3.0, 15 * 60, self.MAX_INTERVAL - self.MAX_DURATION - 1)
            start_interval = int(P_constrain(start_interval, 0, self.MAX_INTERVAL - self.MAX_DURATION - 1))
            return start_interval

        def determineSkillAssignments():
            SKILLS = ['LEVEL_1', 'LEVEL_2', 'LEVEL_3']
            skill = random.choice(SKILLS)
            return skill

        def determineDuration():
            return int(P_map(P_constrain(np.random.normal(), -3.0, 3.0), -3.0, 3.0, 5, self.MAX_DURATION))

        print('Creating demand list...')
        toolbar_width = 40
        pb = ProgressBar(toolbar_width=toolbar_width)
        for x_ in range(self.NUM_CALLS):
            pb.update(x_, self.NUM_CALLS)

            start_interval = determineStartingInterval()
            duration = determineDuration()
            skill = determineSkillAssignments()

            self.addDemand({'id': x_ + 10, 'interval': start_interval, 'duration': duration, 'skill': skill})

        pb.update(1, 1)
        pb.clean()
Ejemplo n.º 4
0
class ProgressBarTestCase(unittest.TestCase):
    """Test the ProgressBar class."""

    DEFAULT_VALUE = 0
    DEFAULT_MAX_VALUE = 100
    DEFAULT_PRINT_WIDTH = 50
    DEFAULT_NEWLINE = "\r"

    def setUp(self):
        """Set up for test."""
        self.bar = ProgressBar(quiet=True)

    def tearDown(self):
        """Clean up after test."""
        self.bar = None

    def test_defaults(self):
        """Test default values are correct."""
        test_data = (
            (self.bar.value, self.DEFAULT_VALUE,
             "default value = {v}".format(v=self.DEFAULT_VALUE)),
            (self.bar.initial_value, self.DEFAULT_VALUE,
             "default initial value = {v}".format(v=self.DEFAULT_VALUE)),
            (self.bar.max_value, self.DEFAULT_MAX_VALUE,
             "default max value = {v}".format(v=self.DEFAULT_MAX_VALUE)),
            (self.bar.print_width, self.DEFAULT_PRINT_WIDTH,
             "default print width = {v}".format(v=self.DEFAULT_PRINT_WIDTH)),
            (self.bar.newline, self.DEFAULT_NEWLINE,
             "default newline = {v}".format(v=self.DEFAULT_NEWLINE)),
        )
        for actual, expected, description in test_data:
            with self.subTest(msg=description):
                self.assertEqual(actual, expected)

    def test_set_and_reset(self):
        """Test setting and resetting the current value."""
        self.bar.set(value=50)
        self.assertEqual(self.bar.value, 50)
        self.bar.reset()
        self.assertEqual(self.bar.value, 0)

    def test_update_ad_draw(self):
        """Test updating and drawing the progress bar."""
        self.bar.reset()
        self.bar.quiet = False
        for i in range(self.bar.initial_value, self.bar.max_value + 1):
            percent = int(i * 100 / self.bar.max_value)
            dots = int(i * self.bar.print_width / self.bar.max_value)
            expected_bar = "{nl}[{c}{nc}] {p}% ".format(
                c="".join(["+"] * dots),
                nc="".join(["."] * (self.bar.print_width - dots)),
                p=percent,
                nl=self.bar.newline)
            with captured_output() as (out, _):
                self.bar.update(i)
            with self.subTest(msg="value = {v}".format(v=i)):
                self.assertEqual(self.bar.value, i)
            with self.subTest(msg="output = {v}".format(v=expected_bar)):
                self.assertEqual(out.getvalue(), expected_bar)
Ejemplo n.º 5
0
    def _generate_AX(self):
        self.log('Creating features and adjacency matrices..')
        pr = ProgressBar(60, len(self.data))

        data = []
        smiles = []
        data_S = []
        data_A = []
        data_X = []
        data_D = []
        data_F = []
        data_Le = []
        data_Lv = []

        max_length = max(mol.GetNumAtoms() for mol in self.data)
        max_length_s = max(len(Chem.MolToSmiles(mol)) for mol in self.data)

        for i, mol in enumerate(self.data):
            A = self._genA(mol, connected=True, max_length=max_length)
            D = np.count_nonzero(A, -1)
            if A is not None:
                data.append(mol)
                smiles.append(Chem.MolToSmiles(mol))
                data_S.append(self._genS(mol, max_length=max_length_s))
                data_A.append(A)
                data_X.append(self._genX(mol, max_length=max_length))
                data_D.append(D)
                data_F.append(self._genF(mol, max_length=max_length))

                L = np.diag(D) - A
                Le, Lv = np.linalg.eigh(L)

                data_Le.append(Le)
                data_Lv.append(Lv)

            pr.update(i + 1)

        self.log(date=False)
        self.log(
            'Created {} features and adjacency matrices  out of {} molecules!'.
            format(len(data), len(self.data)))

        self.data = data
        self.smiles = smiles
        self.data_S = data_S
        self.data_A = data_A
        self.data_X = data_X
        self.data_D = data_D
        self.data_F = data_F
        self.data_Le = data_Le
        self.data_Lv = data_Lv
        self.__len = len(self.data)
Ejemplo n.º 6
0
def process_frame_segments(args, segments, width, height):
    """Post-process frame segments to set frame images, etc."""
    fn = "process_frame_segments"
    globals.log.info("Processing frames...")
    frame_segments = [s for s in segments if isinstance(s, FrameSegment)]
    n = len(frame_segments)
    globals.log.debug("{fn}(): num frames = {n}".format(fn=fn, n=n))
    progress = ProgressBar(max_value=n,
                           quiet=args.quiet or args.debug or n == 0)
    progress.update(0)
    for i, f in enumerate(frame_segments):
        try:
            globals.log.debug(
                "{fn}(): frame (before) = {b}".format(fn=fn, b=f))
            # Frame segments that use a frame from the previous segment.
            if (f.input_file == "^"):
                if (f.segment_number > 0):
                    prev = segments[f.segment_number - 1]
                    globals.log.debug(
                        "{fn}(): prev = {p}".format(fn=fn, p=prev))
                    prev.generate_temp_file(args.output, width=width,
                                            height=height)
                    f.use_frame(
                        prev.generate_frame(f.frame_number, args.output,
                                            width=width, height=height))
                else:
                    globals.log.error(
                        "frame segment {s} is attempting to use the last "
                        "frame of a non-existent previous "
                        "segment".format(s=f.segment_number))
                    sys.exit(1)
            # Frame segments whose frame comes from a PDF file.
            else:
                suffix = PurePath(f.input_file).suffix
                if (suffix.lower() == ".pdf"):
                    f.use_frame(f.generate_temp_file(args.output, width=width,
                                            height=height))
                else:
                    globals.log.error(
                        'unexpected input file type "{s}" for frame segment '
                        "{f}".format(s=suffix, f=f.segment_number))
                    sys.exit(1)
            progress.update(i)
            globals.log.debug("{fn}(): frame (after) = ""{a}".format(fn=fn, a=f))
        except SegmentError as e:
            progress.finish()
            globals.log.exception(e)
            sys.exit(1)
    else:
        progress.finish()
 def runSimulation(self):
     abort = False
     toolbar_width = 40
     pb = ProgressBar(toolbar_width=toolbar_width)
     for k, demand in enumerate(self.demandList):
         if k % round((self.NUM_CALLS / 40)) == 0:
             # print(round(k * 100 / self.NUM_CALLS))
             pb.update(k, len(self.demandList))
         if demand['skill'] in self.exhaustedSkills:
             continue
         assigned = self.findAvailableResource(demand)
         if assigned == 'FAIL' or assigned == 'FAIL_EXHAUSTED':
             if demand['skill'] not in self.exhaustedSkills and assigned == 'FAIL_EXHAUSTED':
                 self.exhaustedSkills.append(demand['skill'])
                 # print('No employees available beginning at interval: ' + str(demand['interval']) + ' for skill: ' + demand['skill'])
                 # turned off due to max interval code interfering with this logic
             if len(self.exhaustedSkills) == self.NUM_SKILLS:
                 pb.update(1, 1)
                 print(f'\nAll skills exhausted. Simulated {k} out of {self.NUM_CALLS} demand events.')
                 abort = True  # switch
             if abort:
                 pb.update(1, 1)
                 break
     pb.update(k + 1, len(self.demandList))
     pb.clean()
    def _generate_AX(self):
        self.log('Creating features and adjacency matrices..')
        pr = ProgressBar(60, len(self.data))

        data_ax = []
        smiles = []
        data_s = []
        data_a = []
        data_x = []
        data_d = []
        data_f = []
        data_le = []
        data_lv = []

        max_length = max(mol.GetNumAtoms() for mol in self.data)
        max_length_s = max(len(Chem.MolToSmiles(mol)) for mol in self.data)

        for i, mol in enumerate(self.data):
            a = self._genA(mol, connected=True, max_length=max_length)
            d = np.count_nonzero(a, -1)
            if a is not None:
                data_ax.append(mol)
                smiles.append(Chem.MolToSmiles(mol))
                data_s.append(self._genS(mol, max_length=max_length_s))
                data_a.append(a)
                data_x.append(self._genX(mol, max_length=max_length))
                data_d.append(d)
                data_f.append(self._genF(mol, max_length=max_length))

                le, lv = np.linalg.eigh(d - a)

                data_le.append(le)
                data_lv.append(lv)

            pr.update(i + 1)

        self.log(date=False)
        self.log('Created {} features and adjacency matrices  out of {} molecules!'.format(len(data_ax),
                                                                                           len(self.data)))

        self.data = data_ax
        self.smiles = smiles
        self.data_S = data_s
        self.data_A = data_a
        self.data_X = data_x
        self.data_D = data_d
        self.data_F = data_f
        self.data_Le = data_le
        self.data_Lv = data_lv
        self.__len = len(self.data)
Ejemplo n.º 9
0
 def predict(self, steps=365, freq="D"):
     print("Forecasting...")
     progress_bar = ProgressBar(len(self.models.items()))
     for item, model in self.models.items():
         future = model.make_future_dataframe(steps, freq=freq)
         pred = model.predict(future).set_index("ds")
         pred = pred[["yhat", "yhat_lower", "yhat_upper"]]
         self.fcst[item] = pred
         if self.use_boxcox:
             self.fcst[item] = inv_boxcox(self.fcst[item],
                                          self.lmbda_boxcox[item])
         progress_bar.update()
     progress_bar.finish()
     fcst_df = pd.concat(self.fcst, axis=1).sort_index(axis=1)
     return fcst_df
 def process_video(self, video, quiet=False):
     """
     Process a video taken from a Receiver
     Parameters:
         video
         quiet: if True, no progress bar is displayed
     """
     processed = []
     progress_bar = ProgressBar('Processing video', 0,
                                len(video) - 1, quiet)
     for i, image in enumerate(video):
         processed.append(self.process_image(image))
         progress_bar.update(i)
     progress_bar.end()
     return processed
    def createResources(self):

        def determineStartingInterval():
            start_interval = P_constrain(np.random.normal(), -3.0, 3.0)
            start_interval = P_map(start_interval, -3.0, 3.0, 0, 3.999)
            start_interval = int(start_interval * 60 * 60)
            return start_interval

        def determineSchedule(start_interval):
            # Build out the employee's schedule
            # Prefill 0's from the beginning of the day until the start of this employee's shift
            temp_sch = [0 for x in range(start_interval)]
            # Fill in 1's for the duration of this employee's shift
            temp_sch.extend([1 for x in range(self.SHIFT_LENGTH)])
            # Fill in 0's from the end of this employee's shift until the end of the day
            temp_sch.extend([0 for x in range(self.NUM_INTERVALS - self.SHIFT_LENGTH - start_interval)])
            return temp_sch

        def determineSkillAssignments():
            SKILLS = ['LEVEL_1', 'LEVEL_2', 'LEVEL_3']
            skill = [random.choice(SKILLS)]
            if random.randint(0, 100) <= 30:
                skill.append(random.choice([x_ for x_ in list(set(SKILLS) - set(skill))]))
            return skill

        print('Creating employee list...')
        toolbar_width = 40
        pb = ProgressBar(toolbar_width=toolbar_width)

        for x_ in range(self.NUM_RESOURCES):
            pb.update(x_, self.NUM_RESOURCES)

            # Select the start time for this employee's shift
            start_interval = determineStartingInterval()

            # Build out the employee's schedule
            schedule = determineSchedule(start_interval)

            utilization = [0 for x in range(self.NUM_INTERVALS)]

            skillset = determineSkillAssignments()

            self.addResource({'id': x_ + 10, 'schedule': schedule, 'utilization': utilization, 'skills': skillset})
        pb.update(1, 1)
        pb.clean()
Ejemplo n.º 12
0
    def fit(self, train_df):
        print("Fitting...")
        progress_bar = ProgressBar(len(train_df.columns))

        for item in train_df.columns:
            target = train_df[item].dropna()
            if self.use_boxcox:
                idx = target.index
                target, self.lmbda_boxcox[item] = boxcox(target)
                target = pd.Series(target, index=idx)
            target.index.name = "ds"
            target.name = "y"
            target = target.reset_index()
            self.models[item] = Prophet(**self.prophet_config)
            self.models[item].fit(target)
            progress_bar.update()
        progress_bar.finish()
        return self.models
Ejemplo n.º 13
0
 def predict(self, steps=365):
     print("Forecasting...")
     progress_bar = ProgressBar(len(self.models.items()))
     for item, model in self.models.items():
         pred = model.predict(
             exogenous=fourier(
                 steps, 
                 seasonality=self.seasonality, 
                 n_terms=self.n_fourier_terms),
             n_periods=steps, 
             return_conf_int=True,
             alpha=(1.0 - self.interval_width))
         fcst = pd.DataFrame()
         fcst["yhat_lower"] = pred[1][:,0]
         fcst["yhat"] = pred[0]
         fcst["yhat_upper"] = pred[1][:,1]
         self.fcst[item] = fcst
         if self.use_boxcox:
             self.fcst[item] = inv_boxcox(
                 self.fcst[item], 
                 self.lmbda_boxcox[item])
         progress_bar.update()
     progress_bar.finish()
     return pd.concat(self.fcst, axis=1)
Ejemplo n.º 14
0
    def run_deep_dream_simple(self,
                              img,
                              steps = 100,
                              learning_rate = 1.0,
                              update_frequency = 5):
        updates = []
        progress_bar = ProgressBar(steps)
        progress_bar.start()

        steps_remaining = steps
        steps_done      = 0
        while steps_remaining:
            if steps_remaining>update_frequency:
                run_steps = tf.constant(update_frequency)
            else:
                run_steps = tf.constant(steps_remaining)
            steps_remaining -= run_steps
            steps_done      += run_steps

            img = self(img, run_steps, tf.constant(learning_rate))
            updates.append(img.numpy())
            progress_bar.update(steps_done)

        return img, updates
Ejemplo n.º 15
0
def general_image_folder(opt):
    """Create lmdb for general image folders
    Users should define the keys, such as: '0321_s035' for DIV2K sub-images
    If all the images have the same resolution, it will only store one copy of resolution info.
        Otherwise, it will store every resolution info.
    """
    #### configurations
    read_all_imgs = False  # whether real all images to memory with multiprocessing
    # Set False for use limited memory
    BATCH = 5000  # After BATCH images, lmdb commits, if read_all_imgs = False
    n_thread = 40
    ########################################################
    img_folder = opt['img_folder']
    lmdb_save_path = opt['lmdb_save_path']
    meta_info = {'name': opt['name']}
    if not lmdb_save_path.endswith('.lmdb'):
        raise ValueError("lmdb_save_path must end with \'lmdb\'.")
    if osp.exists(lmdb_save_path):
        print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path))
        sys.exit(1)

    #### read all the image paths to a list
    print('Reading image path list ...')
    all_img_list = sorted(glob.glob(osp.join(img_folder, '*')))
    keys = []
    for img_path in all_img_list:
        keys.append(osp.splitext(osp.basename(img_path))[0])

    if read_all_imgs:
        #### read all images to memory (multiprocessing)
        dataset = {
        }  # store all image data. list cannot keep the order, use dict
        print('Read images with multiprocessing, #thread: {} ...'.format(
            n_thread))
        pbar = ProgressBar(len(all_img_list))

        def mycallback(arg):
            '''get the image data and update pbar'''
            key = arg[0]
            dataset[key] = arg[1]
            pbar.update('Reading {}'.format(key))

        pool = Pool(n_thread)
        for path, key in zip(all_img_list, keys):
            pool.apply_async(read_image_worker,
                             args=(path, key),
                             callback=mycallback)
        pool.close()
        pool.join()
        print('Finish reading {} images.\nWrite lmdb...'.format(
            len(all_img_list)))

    #### create lmdb environment
    data_size_per_img = cv2.imread(all_img_list[0],
                                   cv2.IMREAD_UNCHANGED).nbytes
    print('data size per image is: ', data_size_per_img)
    data_size = data_size_per_img * len(all_img_list)
    env = lmdb.open(lmdb_save_path, map_size=data_size * 10)

    #### write data to lmdb
    pbar = ProgressBar(len(all_img_list))
    txn = env.begin(write=True)
    resolutions = []
    for idx, (path, key) in enumerate(zip(all_img_list, keys)):
        pbar.update('Write {}'.format(key))
        key_byte = key.encode('ascii')
        data = dataset[key] if read_all_imgs else cv2.imread(
            path, cv2.IMREAD_UNCHANGED)
        if data.ndim == 2:
            H, W = data.shape
            C = 1
        else:
            H, W, C = data.shape
        txn.put(key_byte, data)
        resolutions.append('{:d}_{:d}_{:d}'.format(C, H, W))
        if not read_all_imgs and idx % BATCH == 0:
            txn.commit()
            txn = env.begin(write=True)
    txn.commit()
    env.close()
    print('Finish writing lmdb.')

    #### create meta information
    # check whether all the images are the same size
    assert len(keys) == len(resolutions)
    if len(set(resolutions)) <= 1:
        meta_info['resolution'] = [resolutions[0]]
        meta_info['keys'] = keys
        print('All images have the same resolution. Simplify the meta info.')
    else:
        meta_info['resolution'] = resolutions
        meta_info['keys'] = keys
        print(
            'Not all images have the same resolution. Save meta info for each image.'
        )

    pickle.dump(meta_info, open(osp.join(lmdb_save_path, 'meta_info.pkl'),
                                "wb"))
    print('Finish creating lmdb meta info.')
Ejemplo n.º 16
0
def REDS(mode):
    """Create lmdb for the REDS dataset, each image with a fixed size
    GT: [3, 720, 1280], key: 000_00000000
    LR: [3, 180, 320], key: 000_00000000
    key: 000_00000000

    flow: downsampled flow: [3, 360, 320], keys: 000_00000005_[p2, p1, n1, n2]
        Each flow is calculated with the GT images by PWCNet and then downsampled by 1/4
        Flow map is quantized by mmcv and saved in png format
    """
    #### configurations
    read_all_imgs = False  # whether real all images to memory with multiprocessing
    # Set False for use limited memory
    BATCH = 5000  # After BATCH images, lmdb commits, if read_all_imgs = False
    if mode == 'train_sharp':
        img_folder = '../REDS/train_sharp'
        lmdb_save_path = '../REDS/train_sharp_wval.lmdb'
        H_dst, W_dst = 720, 1280
    elif mode == 'train_sharp_bicubic':
        img_folder = '../REDS/train_sharp_bicubic'
        lmdb_save_path = '../REDS/train_sharp_bicubic_wval.lmdb'
        H_dst, W_dst = 180, 320
    elif mode == 'train_blur_bicubic':
        img_folder = '../REDS/train_blur_bicubic'
        lmdb_save_path = '../REDS/train_blur_bicubic_wval.lmdb'
        H_dst, W_dst = 180, 320
    elif mode == 'train_blur':
        img_folder = '../REDS/train_blur'
        lmdb_save_path = '../REDS/train_blur_wval.lmdb'
        H_dst, W_dst = 720, 1280
    elif mode == 'train_blur_comp':
        img_folder = '../REDS/train_blur_comp'
        lmdb_save_path = '../REDS/train_blur_comp_wval.lmdb'
        H_dst, W_dst = 720, 1280
    elif mode == 'train_sharp_flowx4':
        img_folder = '../REDS/train_sharp_flowx4'
        lmdb_save_path = '../REDS/train_sharp_flowx4.lmdb'
        H_dst, W_dst = 360, 320
    n_thread = 40
    ########################################################
    if not lmdb_save_path.endswith('.lmdb'):
        raise ValueError("lmdb_save_path must end with \'lmdb\'.")
    if osp.exists(lmdb_save_path):
        print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path))
        sys.exit(1)

    #### read all the image paths to a list
    print('Reading image path list ...')
    all_img_list = data_util._get_paths_from_images(img_folder)
    keys = []
    for img_path in all_img_list:
        split_rlt = img_path.split('/')
        folder = split_rlt[-2]
        img_name = split_rlt[-1].split('.png')[0]
        keys.append(folder + '_' + img_name)

    if read_all_imgs:
        #### read all images to memory (multiprocessing)
        dataset = {
        }  # store all image data. list cannot keep the order, use dict
        print('Read images with multiprocessing, #thread: {} ...'.format(
            n_thread))
        pbar = ProgressBar(len(all_img_list))

        def mycallback(arg):
            '''get the image data and update pbar'''
            key = arg[0]
            dataset[key] = arg[1]
            pbar.update('Reading {}'.format(key))

        pool = Pool(n_thread)
        for path, key in zip(all_img_list, keys):
            pool.apply_async(read_image_worker,
                             args=(path, key),
                             callback=mycallback)
        pool.close()
        pool.join()
        print('Finish reading {} images.\nWrite lmdb...'.format(
            len(all_img_list)))

    #### create lmdb environment
    data_size_per_img = cv2.imread(all_img_list[0],
                                   cv2.IMREAD_UNCHANGED).nbytes
    print('data size per image is: ', data_size_per_img)
    data_size = data_size_per_img * len(all_img_list)
    env = lmdb.open(lmdb_save_path, map_size=data_size * 10)

    #### write data to lmdb
    pbar = ProgressBar(len(all_img_list))
    txn = env.begin(write=True)
    for idx, (path, key) in enumerate(zip(all_img_list, keys)):
        pbar.update('Write {}'.format(key))
        key_byte = key.encode('ascii')
        data = dataset[key] if read_all_imgs else cv2.imread(
            path, cv2.IMREAD_UNCHANGED)
        if 'flow' in mode:
            H, W = data.shape
            assert H == H_dst and W == W_dst, 'different shape.'
        else:
            H, W, C = data.shape
            assert H == H_dst and W == W_dst and C == 3, 'different shape.'
        txn.put(key_byte, data)
        if not read_all_imgs and idx % BATCH == 0:
            txn.commit()
            txn = env.begin(write=True)
    txn.commit()
    env.close()
    print('Finish writing lmdb.')

    #### create meta information
    meta_info = {}
    meta_info['name'] = 'REDS_{}_wval'.format(mode)
    channel = 1 if 'flow' in mode else 3
    meta_info['resolution'] = '{}_{}_{}'.format(channel, H_dst, W_dst)
    meta_info['keys'] = keys
    pickle.dump(meta_info, open(osp.join(lmdb_save_path, 'meta_info.pkl'),
                                "wb"))
    print('Finish creating lmdb meta info.')
Ejemplo n.º 17
0
def vimeo90k(mode):
    """Create lmdb for the Vimeo90K dataset, each image with a fixed size
    GT: [3, 256, 448]
        Now only need the 4th frame, e.g., 00001_0001_4
    LR: [3, 64, 112]
        1st - 7th frames, e.g., 00001_0001_1, ..., 00001_0001_7
    key:
        Use the folder and subfolder names, w/o the frame index, e.g., 00001_0001

    flow: downsampled flow: [3, 360, 320], keys: 00001_0001_4_[p3, p2, p1, n1, n2, n3]
        Each flow is calculated with GT images by PWCNet and then downsampled by 1/4
        Flow map is quantized by mmcv and saved in png format
    """
    #### configurations
    read_all_imgs = False  # whether real all images to memory with multiprocessing
    # Set False for use limited memory
    BATCH = 5000  # After BATCH images, lmdb commits, if read_all_imgs = False
    if mode == 'GT':
        img_folder = '../vimeo90k/vimeo_septuplet/sequences'
        lmdb_save_path = '../vimeo90k/vimeo90k_train_GT.lmdb'
        txt_file = '../vimeo90k/vimeo_septuplet/sep_trainlist.txt'
        H_dst, W_dst = 256, 448
    elif mode == 'LR':
        img_folder = '../vimeo90k/vimeo_septuplet_matlabLRx4/sequences'
        lmdb_save_path = '../vimeo90k/vimeo90k_train_LR7frames.lmdb'
        txt_file = '../vimeo90k/vimeo_septuplet/sep_trainlist.txt'
        H_dst, W_dst = 64, 112
    elif mode == 'flow':
        img_folder = '../vimeo90k/vimeo_septuplet/sequences_flowx4'
        lmdb_save_path = '../vimeo90k/vimeo90k_train_flowx4.lmdb'
        txt_file = '../vimeo90k/vimeo_septuplet/sep_trainlist.txt'
        H_dst, W_dst = 128, 112
    else:
        raise ValueError('Wrong dataset mode: {}'.format(mode))
    n_thread = 40
    ########################################################
    if not lmdb_save_path.endswith('.lmdb'):
        raise ValueError("lmdb_save_path must end with \'lmdb\'.")
    if osp.exists(lmdb_save_path):
        print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path))
        sys.exit(1)

    #### read all the image paths to a list
    print('Reading image path list ...')
    with open(txt_file) as f:
        train_l = f.readlines()
        train_l = [v.strip() for v in train_l]
    all_img_list = []
    keys = []
    for line in train_l:
        folder = line.split('/')[0]
        sub_folder = line.split('/')[1]
        all_img_list.extend(
            glob.glob(osp.join(img_folder, folder, sub_folder, '*')))
        if mode == 'flow':
            for j in range(1, 4):
                keys.append('{}_{}_4_n{}'.format(folder, sub_folder, j))
                keys.append('{}_{}_4_p{}'.format(folder, sub_folder, j))
        else:
            for j in range(7):
                keys.append('{}_{}_{}'.format(folder, sub_folder, j + 1))
    all_img_list = sorted(all_img_list)
    keys = sorted(keys)
    if mode == 'GT':  # only read the 4th frame for the GT mode
        print('Only keep the 4th frame.')
        all_img_list = [v for v in all_img_list if v.endswith('im4.png')]
        keys = [v for v in keys if v.endswith('_4')]

    if read_all_imgs:
        #### read all images to memory (multiprocessing)
        dataset = {
        }  # store all image data. list cannot keep the order, use dict
        print('Read images with multiprocessing, #thread: {} ...'.format(
            n_thread))
        pbar = ProgressBar(len(all_img_list))

        def mycallback(arg):
            """get the image data and update pbar"""
            key = arg[0]
            dataset[key] = arg[1]
            pbar.update('Reading {}'.format(key))

        pool = Pool(n_thread)
        for path, key in zip(all_img_list, keys):
            pool.apply_async(read_image_worker,
                             args=(path, key),
                             callback=mycallback)
        pool.close()
        pool.join()
        print('Finish reading {} images.\nWrite lmdb...'.format(
            len(all_img_list)))

    #### write data to lmdb
    data_size_per_img = cv2.imread(all_img_list[0],
                                   cv2.IMREAD_UNCHANGED).nbytes
    print('data size per image is: ', data_size_per_img)
    data_size = data_size_per_img * len(all_img_list)
    env = lmdb.open(lmdb_save_path, map_size=data_size * 10)
    txn = env.begin(write=True)
    pbar = ProgressBar(len(all_img_list))
    for idx, (path, key) in enumerate(zip(all_img_list, keys)):
        pbar.update('Write {}'.format(key))
        key_byte = key.encode('ascii')
        data = dataset[key] if read_all_imgs else cv2.imread(
            path, cv2.IMREAD_UNCHANGED)
        if 'flow' in mode:
            H, W = data.shape
            assert H == H_dst and W == W_dst, 'different shape.'
        else:
            H, W, C = data.shape
            assert H == H_dst and W == W_dst and C == 3, 'different shape.'
        txn.put(key_byte, data)
        if not read_all_imgs and idx % BATCH == 0:
            txn.commit()
            txn = env.begin(write=True)
    txn.commit()
    env.close()
    print('Finish writing lmdb.')

    #### create meta information
    meta_info = {}
    if mode == 'GT':
        meta_info['name'] = 'Vimeo90K_train_GT'
    elif mode == 'LR':
        meta_info['name'] = 'Vimeo90K_train_LR'
    elif mode == 'flow':
        meta_info['name'] = 'Vimeo90K_train_flowx4'
    channel = 1 if 'flow' in mode else 3
    meta_info['resolution'] = '{}_{}_{}'.format(channel, H_dst, W_dst)
    key_set = set()
    for key in keys:
        if mode == 'flow':
            a, b, _, _ = key.split('_')
        else:
            a, b, _ = key.split('_')
        key_set.add('{}_{}'.format(a, b))
    meta_info['keys'] = list(key_set)
    pickle.dump(meta_info, open(osp.join(lmdb_save_path, 'meta_info.pkl'),
                                "wb"))
    print('Finish creating lmdb meta info.')
Ejemplo n.º 18
0
def sgbminnerblock(sorting_value, basis, expect_basis, expect_brownian_basis, \
                   riskfree_price, riskfree_delta, adjusted_price, adjusted_delta,\
                   timespan, bundles):
    """Perform the SGBM backward algorithm with given forward simulations

    This algorithm collects all the necessary data from the forward simulation separately and
    then perform the backward step sequentially with respect to the time discretization, starting
    from the last time period. Within each time period, first all samples are sorted according to
    the sorting value and partition them into bundles containing simular sorting value paths.
    In each bundle, servaral regressions are completed and the resulting coefficients are used to
    calculate the target value (option prices) within each bundle. Finally, all the results are
    collected and a new cycle begins, untill we ratch time zero.

    :param sorting_value: Array of the values of sorting function
    :param basis: Array of the regression basis for each path at every time step
    :param expect_basis: Array of the expectation of the regression basis for each path at
                         every time step
    :param expect_brownian_basis: Array of the expectation of the regression basis multipled by a 
                                  Brownian motion at each dimension for each path at every time step
    :param riskfree_price: Array for the riskfree option price for each paths at expiry
    :param riskfree_delta: Array for the riskfree option hedging for each paths at expiry
    :param adjusted_price: Array for the risk-adjusted option price for each paths at expiry
    :param adjusted_delta: Array for the risk-adjusted option hedging for each paths at expiry
    :param timespan: Array of time partition points
    :param bundles: Number of bundles in all time step (except the starting one)
    :type sorting value: N x (M+1) array
    :type basis: N x Q x M array
    :type expect_basis: N x Q x M array
    :type expect_brownian_basis: N x Q x d x M array
    :type riskfree_price: N x 1 array
    :type riskfree_delta: N x d array
    :type adjusted_price: N x 1 array
    :type adjusted_delta: N x d array
    :type timespan: M+1 array
    :type bundles: int
    :returns: (average riskfree option price, average risk adjusted option price)
    :rtype: (scaler, scaler)

    .. todo:: Allowing variation of bundle size between time steps
    """
    num_of_time_points = np.size(timespan)  # M+1
    num_of_samples = basis.shape[0]  # N
    adaptive_bundles = bundles * np.ones(num_of_time_points, dtype=int)
    adaptive_bundles[0] = 1
    # Calculate the number of bundle for each time step.
    # (preparation for adaptive number of bundles)
    num_of_bundles = np.amax(adaptive_bundles)
    bundle_size = np.zeros((num_of_time_points, num_of_bundles), dtype=int)

    p_bar = ProgressBar(num_of_time_points - 1,
                        prefix='backward process',
                        suffix='',
                        decimals=2,
                        barLength=100)

    # The followings are the main backward in time algorithm.
    for j in range(num_of_time_points - 1):
        p_bar.update(j)

        # Calculate the number of paths in each bundle.
        c_bundles_num = adaptive_bundles[-j - 2]
        bundle_size[-j - 2,
                    0:c_bundles_num] = int(num_of_samples / c_bundles_num)
        bundle_size[-j - 2, 0:num_of_samples -
                    c_bundles_num * int(num_of_samples / c_bundles_num)] += 1

        # Sort the prices and deltas based on the order of sorting value
        order_p = np.argsort(sorting_value[:, -j - 2])

        # Collect all the values that we have to perform regression on
        regression_coeff = np.zeros(
            (c_bundles_num, NUM_REGRESSION, BASIS_ORDER))
        regression_unknown = regression_variable(basis.shape[0], riskfree_price, \
                                                 riskfree_delta, adjusted_price, adjusted_delta)

        # Collect all the basis related values in this time step and put them in order
        sorted_basis = basis[order_p, :, -j - 1]
        sorted_expect_basis = expect_basis[order_p, :, -j - 1]
        sorted_expect_brownian_basis = expect_brownian_basis[order_p, :, :,
                                                             -j - 1]
        sorted_regression_unknown = regression_unknown[order_p, :]

        try:
            # Distribute the relavent information into different bundle and run the regression
            bundle_range = np.empty((c_bundles_num, 2), dtype=int)
            for i in range(c_bundles_num):
                bundle_range[i] = \
                np.array([np.sum(bundle_size[-j-2, 0:i]), np.sum(bundle_size[-j-2, 0:i+1])])
            for i in range(c_bundles_num):
                bundle_regression_unknown = \
                sorted_regression_unknown[bundle_range[i, 0]: bundle_range[i, 1]]
                bundle_basis = sorted_basis[bundle_range[i, 0]:bundle_range[i,
                                                                            1]]
                regression_coeff[i] = regressioninbundle(bundle_basis, \
                                   bundle_regression_unknown)
        except ExceedBoundError:
            raise ExceedBoundError

        # Calculate the previous value based on problem specific numerical scheme
        theta_parameters = np.array(
            [0, 1])  # Adjustment parameters for the integration approximation
        delta_t = timespan[-j - 1] - timespan[-j - 2]
        for i in range(c_bundles_num):
            bundle_expect_basis = sorted_expect_basis[bundle_range[
                i, 0]:bundle_range[i, 1]]
            bundle_expect_brownian_basis = \
            sorted_expect_brownian_basis[bundle_range[i, 0]: bundle_range[i, 1]]
            riskfree_delta[order_p[bundle_range[i, 0]: bundle_range[i, 1]], :], \
            riskfree_price[order_p[bundle_range[i, 0]: bundle_range[i, 1]], :], \
            adjusted_delta[order_p[bundle_range[i, 0]: bundle_range[i, 1]], :], \
            adjusted_price[order_p[bundle_range[i, 0]: bundle_range[i, 1]], :] = \
            numerical_scheme(theta_parameters, delta_t, regression_coeff[i], \
                                             bundle_expect_basis, bundle_expect_brownian_basis)

    p_bar.update(num_of_time_points - 1)

    return (np.mean(riskfree_price[:, :]), np.mean(adjusted_price[:, :]))
Ejemplo n.º 19
0
    for i in range(out_W):
        idx = int(indices_W[i][0])
        out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width,
                                   0].mv(weights_W[i])
        out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width,
                                   1].mv(weights_W[i])
        out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width,
                                   2].mv(weights_W[i])

    return out_2.numpy()


print('Read images...')
pbar = ProgressBar(len(img_list))
for i, v in enumerate(img_list):
    pbar.update('Read {}'.format(v))
    img = cv2.imread(v, cv2.IMREAD_UNCHANGED)
    if len(img.shape) < 3:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    dataset.append(img)
    down_img = imresize_np(img, scale, True)
    down_dataset.append(down_img)
    # print(img.shape, down_img.shape)
    data_size += img.nbytes
    down_data_size += down_img.nbytes
env = lmdb.open(lmdb_save_path, map_size=data_size * 10)
down_env = lmdb.open(down_lmdb_save_path, map_size=down_data_size * 10)
print('Finish reading {} images.\nWrite lmdb...'.format(len(img_list)))

pbar = ProgressBar(len(img_list))
with env.begin(write=True) as txn:  # txn is a Transaction object
Ejemplo n.º 20
0
class FFmpegConcatCommand(FFmpegCommand):
    """An ffmpeg shell command with a complex concat filter."""
    _expect_patterns = [r"time=(\d\d):(\d\d):(\d\d\.\d\d)"]

    def __init__(self,
                 input_options=[],
                 output_options=[],
                 quiet=False,
                 max_progress=100,
                 has_audio=False,
                 has_video=False,
                 process_audio=True,
                 process_video=True,
                 audio_codec="pcm_s16le",
                 video_codec="h264"):
        super().__init__(input_options, output_options)
        self.progress = ProgressBar(max_value=max_progress, quiet=quiet)
        self.has_video = has_video
        self.process_video = process_video
        self.video_codec = video_codec
        if (self.has_video):
            self.prepend_output_options(["-map", "[vconc]"])
            if (self.process_video):
                self.prepend_output_options(["-pix_fmt", "yuv420p"])
            self.prepend_output_options(["-codec:v", self.video_codec])
        self.has_audio = has_audio
        self.process_audio = process_audio
        self.audio_codec = audio_codec
        if (self.has_audio):
            if (self.process_audio):
                self.prepend_output_options(["-ac", "1", "-map", "[anorm]"])
            else:
                self.prepend_output_options(["-map", "[aconc]"])
            self.prepend_output_options(["-codec:a", self.audio_codec])
        self.filters = []

    def append_filter(self, filter):
        """Append a filter to the filters list."""
        if (filter):
            self.filters.append(filter)

    def append_normalisation_filter(self):
        """Append a normalisation audio filter to the complex filter."""
        if (self.has_audio):
            self.append_filter("[aconc] dynaudnorm=r=0.25:f=10:b=1 [anorm]")

    def append_concat_filter(self, frame_type, segments=[]):
        """Append a concat filter to the filters list"""
        # Ignore frame segments.
        if frame_type in ["a", "v"]:
            if (len(segments) > 1):
                self.append_filter(
                    "{inspecs} concat=n={n}:v={v}:a={a} [{t}conc]".format(
                        inspecs=" ".join(
                            [s.output_stream_specifier() for s in segments]),
                        n=len(segments),
                        v=int(frame_type == "v"),
                        a=int(frame_type == "a"),
                        t=frame_type))
            elif (len(segments) == 1):
                self.append_filter("{inspec} {a}null [{t}conc]".format(
                    inspec=segments[0].output_stream_specifier(),
                    a=frame_type if frame_type == "a" else "",
                    t=frame_type))

    def build_complex_filter(self):
        """Build the complete complex filter.
        
        Filters in the filtergraph are separated by ";".
        """
        return "{f}".format(f=";".join(self.filters))

    def argument_string(self, quote=False):
        """Return the list of arguments as a string."""
        args = (
            self._base_options + self.input_options +
            ["-filter_complex", self.build_complex_filter()] +
            self.output_options)
        if quote:
            return " ".join([ShellCommand.shellquote(a) for a in args])
        else:
            return " ".join(args)

    def argument_list(self):
        """Return a combined list of all arguments."""
        return (
            self._base_options + self.input_options +
            ["-filter_complex", self.build_complex_filter()] +
            self.output_options)

    def process_pattern(self, pat):
        """Respond to a pexpect pattern. Return True on EOF."""
        if (pat == 1):
            elapsed = datetime.timedelta(
                hours=int(self.process.match.group(1)),
                minutes=int(self.process.match.group(2)),
                seconds=float(self.process.match.group(3)))
            self.progress.update(elapsed.total_seconds())
        return (pat == 0)
    def printStatistics(self):
        SERVICE_LEVEL = 20
        total_calls = len(self.demandList)
        missed_calls = 0
        delay = 0
        total_delay = 0
        max_delay = 0
        out_svl = 0
        unanswered_calls = 0
        print('Calculating demand statistics...')
        for call in self.demandList:
            if 'answered_interval' not in call:
                unanswered_calls += 1
                missed_calls += 1
                out_svl += 1
            else:
                if call['interval'] != call['answered_interval']:
                    missed_calls += 1
                    delay = call['answered_interval'] - call['interval']
                    total_delay += delay
                    if delay > max_delay:
                        max_delay = delay
                    if delay > SERVICE_LEVEL:
                        out_svl += 1

        print('Calculating resource statistics...')
        utilization = 0
        available = 0
        for res in self.resourceList:
            available += self.SHIFT_LENGTH
            for moment in res['utilization']:
                if moment > 1:
                    utilization += 1
        res_utilization = round(100 * utilization / available, 2)

        if missed_calls > 0:
            avg_delay = round(total_delay / missed_calls, 2)
            asa = round(total_delay / total_calls, 0)
        else:
            avg_delay = 0
            asa = 0

        unanswered_perc = round(100 * unanswered_calls / total_calls, 2)
        calc_svl = round(100 - (100 * out_svl / total_calls), 2)
        delayed_perc = round(100 * missed_calls / total_calls, 2)

        print(f'Total calls: {total_calls}')
        print(f'Number of resources: {self.NUM_RESOURCES}')
        print(f'Service level ({SERVICE_LEVEL}): {calc_svl}%')
        print(f'Delayed calls: {missed_calls} ({delayed_perc}%)')
        print(f'Unanswered calls: {unanswered_calls} ({unanswered_perc}%)')
        print(f'Average seconds to answer: {asa:.0f}')
        print(f'Average delay: {avg_delay}')
        print(f'Max delay: {max_delay}')
        print(f'Resource Utilization: {res_utilization}%')

        if self.WRITE_TO_FILE:
            import sqlite3
            import csv
            conn = sqlite3.connect('perfmodel.db')
            crsr = conn.cursor()
            crsr.execute("""DROP TABLE IF EXISTS tblDemand;""")
            conn.commit()
            crsr.execute("""CREATE TABLE tblDemand (ID BIGINT PRIMARY KEY, INTERVAL BIGINT, DURATION BIGINT, SKILL STRING);""")
            conn.commit()

            print('Writing to file...')
            with open('demand_stats.csv', 'w') as csv_file:
                writer = csv.DictWriter(csv_file, fieldnames=self.demandList[0].keys(), lineterminator="\n")
                writer.writeheader()
                for w_ in self.demandList:
                    writer.writerow(w_)
                    # print(f"""INSERT INTO tblDemand VALUES ({w_['id']}, {w_['interval']}, {w_['duration']},'{w_['skill']}');""")
                    # crsr.execute(f"""INSERT INTO tblDemand VALUES ({w_['id']}, {w_['interval']}, {w_['duration']},'{w_['skill']}');""")
            conn.commit()

            crsr.execute("""DROP TABLE IF EXISTS tblResources;""")
            conn.commit()
            crsr.execute("""CREATE TABLE tblResources (ID BIGINT, SCHEDULE TINYINT, UTILIZATION BIGINT, SKILLS STRING);""")
            conn.commit()

            toolbar_width = 40
            pb = ProgressBar(toolbar_width=toolbar_width)
            with open('resource_stats.csv', 'w') as csv_file:
                writer = csv.DictWriter(csv_file, fieldnames=['id', 'interval', 'schedule', 'utilization'], lineterminator="\n")
                writer.writeheader()
                for k, w_ in enumerate(self.resourceList):
                    # print(f"""INSERT INTO tblResources VALUES ({w_['id']}, '{w_['skills'][0]}');""")
                    # crsr.execute(f"""INSERT INTO tblResources VALUES ({w_['id']}, '{w_['skills'][0]}');""")
                    pb.update(k, len(self.resourceList))
                    for i_ in range(self.NUM_INTERVALS):
                        row = {'id': w_['id'], 'interval': str(i_), 'schedule': w_['schedule'][i_], 'utilization': w_['utilization'][i_]}
                        # crsr.execute(f"""INSERT INTO tblResources VALUES ({w_['id']}, {w_['schedule'][i_]}, {w_['utilization'][i_]}, '{w_['skills'][0]}');""")
                        writer.writerow(row)
            pb.update(1, 1)
            pb.clean()
            conn.commit()
            conn.close()