Example #1
0
    def __init__(self,
                 root: str = folder,
                 train_subset: bool = True,
                 suffix: str = '.png',
                 min_num_cls: int = 5,
                 max_num_cls: int = 20,
                 k_shot: int = 20,
                 expand_dim: bool = False,
                 load_images: bool = True) -> None:
        """Initialize a data loader for Omniglot data set or a two-level dataset
            with structure similar to Omniglot: alphabet -> character -> image

        Args:
            root (str): path to the folder of Omniglot data set
            train_subset (bool): if True, this will load data from
                the ``images_background`` folder (or, training set). If False,
                it loads data from ``images_evaluation``` (or, validation set)
            suffix (str): the suffix of images
            min_num_cls (int): minimum number of classes within a generated episode
            max_num_cls (int): maximum number of classes within a generated episode
            expand_dim (bool): if True, repeat the channel dimension from 1 to 3
            load_images (bool): if True, this will place all image data (PIL) on RAM.
                This option is optimal for small data set since it would speed up
                the data loading process. If False, it will load images whenever called.
                This option is suitable for large data set.

        Returns: an OmniglotLoader instance
        """
        self.root = os.path.join(
            root, 'images_background' if train_subset else 'images_evaluation')
        self.suffix = suffix
        self.min_num_cls = min_num_cls
        self.max_num_cls = max_num_cls
        self.k_shot = k_shot
        self.expand_dim = expand_dim
        self.load_images = load_images

        # create a nested dictionary to store data
        self.data = dict.fromkeys(list_dir(root=self.root))
        for alphabet in self.data:
            self.data[alphabet] = dict.fromkeys(
                list_dir(root=os.path.join(self.root, alphabet)))

            # loop through each alphabet
            for character in self.data[alphabet]:
                self.data[alphabet][character] = []

                # loop through all images in an alphabet character
                for img_name in list_files(root=os.path.join(
                        self.root, alphabet, character),
                                           suffix=suffix):
                    if self.load_images:
                        # load images
                        img = _load_image(img_url=os.path.join(
                            self.root, alphabet, character, img_name),
                                          expand_dim=self.expand_dim)
                    else:
                        img = img_name

                    self.data[alphabet][character].append(img)
Example #2
0
def wait_for_rar(folder, sab_nzo_id, some_rar):
    isCanceled = False
    is_rar_found = False
    # If some_rar exist we skip dialogs
    for file, bytes in utils.sorted_rar_file_list(utils.list_dir(folder)):
        if file == some_rar:
            is_rar_found = True
            break
    if not is_rar_found:
        seconds = 0
        progressDialog = xbmcgui.DialogProgress()
        progressDialog.create('NZBS', 'Request to SABnzbd succeeded, waiting for ', some_rar)
        while not is_rar_found:
            seconds += 1
            time.sleep(1)
            dirList = utils.sorted_rar_file_list(utils.list_dir(folder))
            for file, bytes in dirList:
                if file == some_rar:
                    path = os.path.join(folder,file)
                    # Wait until the file is written to disk before proceeding
                    size_now = int(bytes)
                    size_later = 0
                    while (size_now != size_later) or (size_now == 0) or (size_later == 0):
                        size_now = os.stat(path).st_size
                        if size_now != size_later:
                            time.sleep(0.5)
                            size_later = os.stat(path).st_size
                    is_rar_found = True
                    break
            label = str(seconds) + " seconds"
            # TODO
            # Shorten some_rar if to long for the dialog window
            progressDialog.update(0, 'Request to SABnzbd succeeded, waiting for', some_rar, label)
            if progressDialog.iscanceled():
                progressDialog.close()
                dialog = xbmcgui.Dialog()
                ret = dialog.select('What do you want to do?', ['Delete job', 'Just download'])
                if ret == 0:
                    pause = SABNZBD.pause('',sab_nzo_id)
                    time.sleep(3)
                    delete_ = SABNZBD.delete_queue('',sab_nzo_id)
                    if not "ok" in delete_:
                        xbmc.log(delete_)
                        xbmc.executebuiltin('Notification("NZBS","Deleting failed")')
                    else:
                        xbmc.executebuiltin('Notification("NZBS","Deleting succeeded")')
                    iscanceled = True
                    return iscanceled 
                if ret == 1:
                    iscanceled = True
                    xbmc.executebuiltin('Notification("NZBS","Downloading")')
                    return iscanceled
        progressDialog.close()
    return isCanceled
Example #3
0
    def import_call(self, e):
        if setting_fftool.has_query:
            utils.showinfo("有任务正在执行,请稍后")
            return

        tup = tuple([])
        ft = self.file_types
        ft_tup = self.file_types_tup
        if e.widget == self.import_btn:
            tup = filedialog.askopenfilenames(
                filetypes=ft,
                title='导入文件',
                initialdir=setting_fftool.last_folder)

        elif e.widget == self.import_list_btn:
            if os.path.exists(setting_fftool.list_file):
                arr = utils.read_txt(setting_fftool.list_file)
                new_arr = []
                for f in arr:
                    if os.path.exists(f):
                        new_arr.append(f)
                if not len(new_arr):
                    utils.showinfo('txt中的地址都不正确' + setting_fftool.list_file)
                    return
                tup = tuple(new_arr)

        elif e.widget == self.import_dir_btn:
            folder = filedialog.askdirectory(
                title='选择目录', initialdir=setting_fftool.last_folder)
            if folder:
                folder = utils.pathlib_path(folder)
                setting_fftool.last_folder = folder
                arr = []
                new_arr = []
                # 获得目录下所有文件
                utils.list_dir(folder, arr)
                # 过滤出指定格式的文件
                for f in arr:
                    suffix = str(Path(f).suffix)
                    for f_type in ft_tup:
                        if suffix == f_type:
                            new_arr.append(f)
                            break
                tup = tuple(new_arr)

        if len(tup):
            tup = utils.pathlib_path_tup(tup, True)
            self.tree.set_list(list(tup))
            # self.start.set_state(True)
            # self.clear_query()

            setting_fftool.last_folder = utils.pathlib_parent(tup[0])
Example #4
0
 def loadNorlmalRace(self, detect_dir):
     # .../detect => .../summary
     summary_dir = detect_dir.replace('detect', 'summary')
     for f in list_dir(summary_dir, ext='NORMAL.tot-race'):
         with open(f, 'r') as fd:
             for line in fd:
                 self.normal_race_.add(InstRace.parse(line))
Example #5
0
    def upload(self, container, path, verbose=True):
        if path[-1] == '/':
            path = path[:-1]

        files = utils.list_dir(path)

        for filename in files:

            (fh, content_type, content_length) = utils.get_file_infos(filename)

            url = "{}/{}/{}".format(self.get_storage_url(), container,
                                    filename)

            data = fh.read()

            headers = {
                'Content-Type': content_type,
                'X-Storage-Token': self.get_token(),
                'Content-Length': content_length
            }

            request = urllib2.Request(url, data, headers)
            request.get_method = lambda: 'PUT'

            response = urllib2.urlopen(request)

            if response.code == 201:
                msg = '{} - OK'.format(filename)
            else:
                msg = '{} - FAIL (error {})'.format(filename, response.code)

            if verbose:
                print(msg)
Example #6
0
    def upload(self, container, path, verbose=True):
        if path[-1] == '/':
            path = path[:-1]

        files = utils.list_dir(path)

        for filename in files:

            (fh, content_type, content_length) = utils.get_file_infos(filename)

            url = "{}/{}/{}".format(self.get_storage_url(),
                                    container,
                                    filename)

            data = fh.read()

            headers = {
                'Content-Type': content_type,
                'X-Storage-Token': self.get_token(),
                'Content-Length': content_length
            }

            request = urllib2.Request(url, data, headers)
            request.get_method = lambda: 'PUT'

            response = urllib2.urlopen(request)

            if response.code == 201:
                msg = '{} - OK'.format(filename)
            else:
                msg = '{} - FAIL (error {})'.format(filename, response.code)

            if verbose:
                print(msg)
Example #7
0
def main(conf):
    """
    Read js-test run log (data/log/), check whether error occurs,
    fill a result dict (ret_dict: {0: [paths], 1:[paths]});
    Pick those without errors (ret_dict[0]), write to seed files (data/seed).
    Remove lines containing 'load("' or "load('" -- they reference to other files (how to deal with them??)

    :param conf: Config object
    :return:
    """
    func = get_func(conf.eng_name)

    ret_dict = {}
    for log_path in list_dir(conf.log_dir):
        ret = func(log_path)
        if ret not in ret_dict: ret_dict[ret] = []
        ret_dict[ret] += [log_path]

    new_seed_dir = os.path.join(conf.data_dir, 'seed')
    make_dir(new_seed_dir)

    for log_path in ret_dict[0]:
        file_name = os.path.basename(log_path) + '.js'
        js_path = os.path.join(conf.seed_dir, file_name)
        new_js_path = os.path.join(new_seed_dir, file_name)

        with open(js_path, 'r') as fr, \
                open(new_js_path, 'w') as fw:
            for line in fr:
                if ('load("' not in line and 'load(\'' not in line):
                    fw.write(line)
Example #8
0
    def loadTotalRaceFile(self, detect_dir, sched):
        #  if sched != 'NORMAL':
        #  self.loadNorlmalRace(detect_dir)

        for f in list_dir(detect_dir, ext='.race', fkey=self.fnameTimestamp):
            if sched is not None and sched not in f:
                continue
            self.loadRaceFile(f)
Example #9
0
 def detectTotalRace(self, detect_dir, sched):
     for trace_file in list_dir(detect_dir, ext='.trace'):
         if sched is not None and sched not in trace_file:
             continue
         race_file = trace_file.replace('.trace', '.race')
         if not os.path.exists(race_file):
             print 'TO detect:', trace_file
             bin = os.path.join(PROJ_DIR, 'bin/detect')
             shell_exec('%s %s' % (bin, trace_file))
Example #10
0
    def parse(self, seed_dir, ast_dir):
        js_list = list_dir(seed_dir)
        num_js = len(js_list)
        msg = 'Start parsing %d JS files' % (num_js)
        print_msg(msg, 'INFO')

        cmd = ['node', 'utils/parse.js']
        cmd += [seed_dir, ast_dir]
        parser = Popen(cmd, cwd='./', stdin=PIPE, stdout=PIPE, stderr=PIPE)
        parser.wait()
 def __init__(self, root='omniglot', *args, **kwargs):
     '''
     :param root: folder containing alphabets for background and evaluation set
     '''
     self.root = root
     self.alphabets = list_dir(root)
     self._characters = {}
     for alphabet in self.alphabets:
         for character in list_dir(os.path.join(root, alphabet)):
             full_character = os.path.join(root, alphabet, character)
             character_idx = len(self._characters)
             self._characters[full_character] = []
             for filename in list_files(full_character, '.png'):
                 self._characters[full_character].append({
                     'path':
                     os.path.join(root, alphabet, character, filename),
                     'character_idx':
                     character_idx
                 })
     characters_list = np.asarray(self._characters.items())
     AbstractMetaOmniglot.__init__(self, characters_list, *args, **kwargs)
Example #12
0
def import_zupc(import_='True'):
    require.files.directory('/tmp/zupc')
    with cd('/tmp/zupc/'):
        for f in list_dir():
            if f == '*' or f.endswith('zip'):
                continue
            run('rm -f {}'.format(f))
        if import_=='True':
            import_contours()

    base_dir = ''
    with cd(env.deploy_dir):
        for f in list_dir():
            if files.is_dir(f) and 'deployment' in f and f > base_dir:
                base_dir = f
    api_dir = env.deploy_dir+'/' + base_dir
    with cd('/tmp/zupc'):
        wget(env.zupc_fichier)
    with python.virtualenv(base_dir + '/venvAPITaxi'), cd(base_dir+'/APITaxi-master'):
        with shell_env(APITAXI_CONFIG_FILE='prod_settings.py'):
            run('python manage.py load_zupc /tmp/zupc/zupc.geojson')
Example #13
0
 def removeEmptyFile(self, detect_dir):
     for f in list_dir(detect_dir):
         # Iterate files while deleting
         if not os.path.exists(f):
             continue
         if os.path.getsize(f) == 0:
             print 'rm', f
             rm_file(f)
             # To remove corresponding trace file
             if f.endswith('.race'):
                 print 'rm', f.replace('.race', '.trace')
                 rm_file(f.replace('.race', '.trace'))
Example #14
0
    def __init__(self, root, background=True,
                 transform=None, target_transform=None,
                 download=False):
        self.root = join(os.path.expanduser(root), self.folder)
        self.background = background
        self.transform = transform
        self.target_transform = target_transform

        if download:
            self.download()

        if not self._check_integrity():
            raise RuntimeError('Dataset not found or corrupted.' +
                               ' You can use download=True to download it')

        self.target_folder = join(self.root, self._get_target_folder())
        self._alphabets = list_dir(self.target_folder)
        self._characters = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))]
                                for a in self._alphabets], [])
        self._character_images = [[(image, idx) for image in list_files(join(self.target_folder, character), '.png')]
                                  for idx, character in enumerate(self._characters)]
        self._flat_character_images = sum(self._character_images, [])
Example #15
0
def build_dict(ast_dir):
    def_dict = {}
    ast_list = list_dir(ast_dir)
    num_ast = len(ast_list)
    for idx, ast_name in enumerate(ast_list):
        msg = '[%d/%d] %s' % (idx + 1, num_ast, ast_name)
        print_msg(msg, 'INFO')
        js_name, ast = load_ast(ast_name)
        js_name = trim_seed_name(js_name)
        if js_name not in def_dict:
            def_dict[js_name] = set()
        build_def_dict(ast, def_dict[js_name])
    return def_dict
Example #16
0
 def run(self):  # type: () -> None
     while not self._join:
         loginfo('ros_rest : ' + self.name + ' : looking for new records')
         try:
             # Check for new sorted records to send, if there is send it to the server using the shared session
             for bag in list_dir(self.__last):
                 _bags.put(bag)
                 self.__last = bag.bag.split('.')[0].split('_')[-1]
         except BaseException as err:
             logwarn('ros_rest : ' + self.name + ' : ' + str(err))
             _event.wait(_err_sleep)
         _event.wait(self.sleep)
     _bags.join()
     self.join()
Example #17
0
def main(pool, conf):
    make_dir(conf.log_dir)

    js_list = []
    for js in list_dir(conf.seed_dir):
        if (js.endswith('.js')
                and os.path.getsize(js) < 30 * 1024):  # Excludes JS over 3KB
            js_list += [js]

    num_js = len(js_list)
    msg = 'Start executing %d JS files' % (num_js)
    print_msg(msg, 'INFO')

    pool_map(pool, exec_eng, js_list, conf=conf)
Example #18
0
 def archives(self):
     files = list(list_dir(config.events_dir))
     files.sort()
     events = defaultdict(dict)
     for f in files:
         for (tid, event) in self.load_events(f).items():
             events[tid].update(event)
     tids = self.get_tids()
     history = []
     for tid, e in events.items():
         info = self.get_torrent_info(e['filename'])
         info and e.update(info.__dict__)
         if tid not in tids:
             history.append(e)
     return sorted(history, key = lambda e: self.event_date(e), reverse = True)
Example #19
0
    def parse(self, seed_dir, ast_dir):
        js_list = list_dir(seed_dir)
        num_js = len(js_list)
        msg = 'Start parsing %d JS files' % (num_js)
        print_msg(msg, 'INFO')

        cmd = ['node', 'utils/parse.js']
        cmd += [seed_dir, ast_dir]
        print(cmd)
        parser = Popen(cmd, cwd='./', stdin=PIPE, stdout=PIPE, stderr=PIPE)
        ## Error would be silented out by default. Uncomment when having problems.
        #for line in parser.stderr.readlines():
        #    print(line)
        #    sys.stderr.flush()
        parser.wait()
Example #20
0
 def __init__(self, root, loader, extensions, transform=None):
     self.root = root
     self.loader = loader
     self.extensions = extensions 
     dirs = list_dir(self.root)
     if(dirs != []):
         self.classes, self.classes_to_labels = self._find_classes(self.root)
         self.classes_size = len(self.classes_to_labels.keys())
         self.output_names = ['img','label']
         self.classes_counts = make_classes_counts(self.data['label'],self.classes_size)
     else:
         self.classes_to_labels = None
         self.classes_size = 0
         self.output_names = ['img']
     self.data = make_img_dataset(self.root, self.extensions, self.classes_to_labels)       
     self.transform = transform
Example #21
0
def get_cls_img(root: str, suffix: str) -> dict:
    """Get folders from root, and images in each folder

    Args:
        root (str): the desired directory
        suffix (str): the suffix of file or image within each folder

    Returns: dictionary with keys are the folder names,
        and values are the lists of files within the corresponding folder
    """
    cls_img = dict.fromkeys(list_dir(root=root))
    for dir_ in cls_img:
        cls_img[dir_] = list_files(root=os.path.join(root, dir_),
                                   suffix=suffix)

    return cls_img
Example #22
0
    def build_dict(self):
        js_list = list_dir(self._seed_dir)

        for js_path in js_list:
            with open(js_path, 'r') as f:
                js_name = os.path.basename(js_path)
                for line in f:
                    line = line.strip()
                    if not self.is_load(line):
                        continue

                    harness = self.get_harness(line)
                    if not harness.endswith('.js'):
                        continue

                    if js_name not in self._harness_dict:
                        self._harness_dict[js_name] = []
                    self._harness_dict[js_name] += [harness]
Example #23
0
 def today(self):
     torrents = dict(self.get_torrents())
     files = list(list_dir(config.events_dir))
     files.sort()
     events = defaultdict(dict)
     for f in files:
         for (tid, event) in self.load_events(f).items():
             events[tid].update(event)
     tids = torrents.keys()
     history = []
     for tid, e in events.items():
         info = self.get_torrent_info(e['filename'])
         info and e.update(info.__dict__)
         if tid in tids:
             torrents[tid].__dict__.update(e)
         elif e['last'].date() == date.today():
             history.append(e)
     return (torrents.values(), sorted(history, key = lambda e: self.event_date(e), reverse = True))
Example #24
0
def build_dict(ast_dir):
    '''
    Gather all ast .json files, load them (a dict for each .json), and
    product a map for each file.

    :param ast_dir:
    :return:
    '''
    def_dict = {}
    ast_list = list_dir(ast_dir)
    num_ast = len(ast_list)
    for idx, ast_name in enumerate(ast_list):
        msg = '[%d/%d] %s' % (idx + 1, num_ast, ast_name)
        print_msg(msg, 'INFO')
        js_name, ast = load_ast(ast_name)
        js_name = trim_seed_name(js_name)
        if js_name not in def_dict:
            def_dict[js_name] = set()
        build_def_dict(ast, def_dict[js_name])
    return def_dict
Example #25
0
def main(pool, conf):
    """
    Read from js-test-suite, process with the engine, and write stdout/stderr to data/log/*

    :param pool:
    :param conf:
    :return:
    """
    make_dir(conf.log_dir)

    js_list = []
    for js in list_dir(conf.seed_dir):
        if (js.endswith('.js')
                and os.path.getsize(js) < 30 * 1024):  # Excludes JS over 3KB
            js_list += [js]

    num_js = len(js_list)
    msg = 'Start executing %d JS files' % (num_js)
    print_msg(msg, 'INFO')

    pool_map(pool, exec_eng, js_list, conf=conf)
Example #26
0
def main(conf):
    func = get_func(conf.eng_name)

    ret_dict = {}
    for log_path in list_dir(conf.log_dir):
        ret = func(log_path)
        if ret not in ret_dict: ret_dict[ret] = []
        ret_dict[ret] += [log_path]

    new_seed_dir = os.path.join(conf.data_dir, 'seed')
    make_dir(new_seed_dir)

    for log_path in ret_dict[0]:
        file_name = os.path.basename(log_path) + '.js'
        js_path = os.path.join(conf.seed_dir, file_name)
        new_js_path = os.path.join(new_seed_dir, file_name)

        with open(js_path, 'r') as fr, \
             open(new_js_path, 'w') as fw:
            for line in fr:
                if ('load("' not in line and 'load(\'' not in line):
                    fw.write(line)
Example #27
0
def main(pool, conf):
    ast_list = list_dir(conf.ast_dir)
    pool_map(pool, normalize, ast_list)
Example #28
0
def pre_play(nzbname, mode=None):
    iscanceled = False
    folder = INCOMPLETE_FOLDER + nzbname
    sab_nzo_id = SABNZBD.nzo_id(nzbname)
    file_list = utils.list_dir(folder)
    sab_file_list = []
    multi_arch_list = []
    if sab_nzo_id is None:
        sab_nzo_id_history = SABNZBD.nzo_id_history(nzbname)
    else:
        sab_file_list = SABNZBD.file_list(sab_nzo_id)
        file_list.extend(sab_file_list)
        sab_nzo_id_history = None
    file_list = utils.sorted_rar_file_list(file_list)
    multi_arch_list = utils.sorted_multi_arch_list(file_list)
    # Loop though all multi archives and add file to the
    play_list = []
    for arch_rar, byte in multi_arch_list:
        if sab_nzo_id is not None:
            t = Thread(target=to_bottom,
                       args=(
                           sab_nzo_id,
                           sab_file_list,
                           file_list,
                       ))
            t.start()
            iscanceled = get_rar(folder, sab_nzo_id, arch_rar)
        if iscanceled:
            break
        else:
            if sab_nzo_id:
                set_streaming(sab_nzo_id)
            # TODO is this needed?
            time.sleep(1)
            # RAR ANALYSYS #
            in_rar_file_list = utils.rar_filenames(folder, arch_rar)
            movie_list = utils.sort_filename(in_rar_file_list)
            # Make sure we have a movie
            if not (len(movie_list) >= 1):
                xbmc.executebuiltin('Notification("NZBS","Not a movie!")')
                break
            # Who needs sample?
            movie_no_sample_list = utils.no_sample_list(movie_list)
            # If auto play is enabled we skip samples in the play_list
            if AUTO_PLAY and mode is not MODE_INCOMPLETE_LIST:
                for movie_file in movie_no_sample_list:
                    play_list.append(arch_rar)
                    play_list.append(movie_file)
            else:
                for movie_file in movie_list:
                    play_list.append(arch_rar)
                    play_list.append(movie_file)
            # If the movie is a .mkv we need the last rar
            if utils.is_movie_mkv(movie_list) and sab_nzo_id:
                # If we have a sample or other file, the second rar is also needed..
                if len(in_rar_file_list) > 1:
                    second_rar = utils.find_rar(file_list, 0)
                    iscanceled = get_rar(folder, sab_nzo_id, second_rar)
                last_rar = utils.find_rar(file_list, -1)
                iscanceled = get_rar(folder, sab_nzo_id, last_rar)
                if iscanceled:
                    break
    if iscanceled:
        return
    else:
        rar_file_list = [x[0] for x in file_list]
        if (len(rar_file_list) >= 1):
            if AUTO_PLAY and (mode is None or mode is MODE_JSONRPC):
                video_params = dict()
                if not mode:
                    video_params['mode'] = MODE_AUTO_PLAY
                else:
                    video_params['mode'] = MODE_JSONRPC
                video_params['play_list'] = urllib.quote_plus(
                    ';'.join(play_list))
                video_params['file_list'] = urllib.quote_plus(
                    ';'.join(rar_file_list))
                video_params['folder'] = urllib.quote_plus(folder)
                return play_video(video_params)
            else:
                return playlist_item(play_list, rar_file_list, folder,
                                     sab_nzo_id, sab_nzo_id_history)
        else:
            xbmc.executebuiltin(
                'Notification("NZBS","No rar\'s in the NZB!!")')
            return
Example #29
0
def wait_for_rar(folder, sab_nzo_id, some_rar):
    isCanceled = False
    is_rar_found = False
    # If some_rar exist we skip dialogs
    for file, bytes in utils.sorted_rar_file_list(utils.list_dir(folder)):
        if file == some_rar:
            is_rar_found = True
            break
    if not is_rar_found:
        seconds = 0
        progressDialog = xbmcgui.DialogProgress()
        progressDialog.create('NZBS',
                              'Request to SABnzbd succeeded, waiting for ',
                              utils.short_string(some_rar))
        while not is_rar_found:
            seconds += 1
            time.sleep(1)
            dirList = utils.sorted_rar_file_list(utils.list_dir(folder))
            for file, bytes in dirList:
                if file == some_rar:
                    path = os.path.join(folder, file)
                    # Wait until the file is written to disk before proceeding
                    size_now = int(bytes)
                    size_later = 0
                    while (size_now != size_later) or (size_now
                                                       == 0) or (size_later
                                                                 == 0):
                        size_now = os.stat(path).st_size
                        if size_now != size_later:
                            time.sleep(0.5)
                            size_later = os.stat(path).st_size
                    is_rar_found = True
                    break
            label = str(seconds) + " seconds"
            progressDialog.update(0,
                                  'Request to SABnzbd succeeded, waiting for',
                                  utils.short_string(some_rar), label)
            if progressDialog.iscanceled():
                progressDialog.close()
                dialog = xbmcgui.Dialog()
                ret = dialog.select('What do you want to do?',
                                    ['Delete job', 'Just download'])
                if ret == 0:
                    pause = SABNZBD.pause('', sab_nzo_id)
                    time.sleep(3)
                    delete_ = SABNZBD.delete_queue('', sab_nzo_id)
                    if not "ok" in delete_:
                        xbmc.log(delete_)
                        xbmc.executebuiltin(
                            'Notification("NZBS","Deleting failed")')
                    else:
                        xbmc.executebuiltin(
                            'Notification("NZBS","Deleting succeeded")')
                    iscanceled = True
                    return iscanceled
                if ret == 1:
                    iscanceled = True
                    xbmc.executebuiltin('Notification("NZBS","Downloading")')
                    return iscanceled
        progressDialog.close()
    return isCanceled
Example #30
0
def tile_to_csv(inputpath, coordspath, coordsfilename, patches_per_tile, patch_size, classes):
    """ save location of patches in csv-file.
    
    Locations of top-left corner-pixel of patches are saved. These pixels
    are chosen at random, however the percentual class division is respected.
    
    parameters
    ----------
        inputpath: string
            path to folders with tiles. Each tile should be in separate folder  
        coordspath: string
            path to outputfolder to save file with coordinates
        coordsfilename: string
            output filename, extention should be '.csv'
        patches_per_tile: int
            number of patches to extract per tile. Final number can be lower if the 
            classes cover very few pixels
        patch_size: int
            size of patch to extract. Final extracted patches will include padding 
            to be able to predict full image.
        classes: list
            list with classes to be predicted
    
    calls
    -----
        sample_patches_of_class()
    
    output
    -------
        outputfile: csv
            each row contains tile-name and row + column of top-left pixel of patch: 
            tile,row,column
            saved at outputpath.
    """
    
    # init
    dirs = list_dir(inputpath)   
    patch_size = patch_size // 5 # because downsample from 20cmX20cm to 1mx1m 
    patch_size_padded = int(patch_size * 3) 
    
    if not os.path.isdir(coordspath):
        os.makedirs(coordspath)  
    
    for i_lap, d in enumerate(dirs):
    
        # ground truth
        path_SHP = inputpath + d + '/tare.tif'
        gt = gdal.Open(path_SHP,gdal.GA_ReadOnly)
        # resample to 1m resolution
        gt = gdal.Warp('', [gt], format='MEM', width=gt.RasterXSize//5, height=gt.RasterYSize//5, resampleAlg=0) 
        band = gt.GetRasterBand(1)
        gt = np.int16(band.ReadAsArray())
        del band        
        
        # take care of classes
        tara0_mask = gt==classes[0]
        tara20_mask = gt==classes[1]
        tara50_mask = gt==classes[2]
        woods_mask = np.logical_or(gt==classes[3],gt==656)
        no_coltivable_mask = np.logical_or(gt==classes[4],gt==780)
        gt[woods_mask]=classes[3]
        gt[no_coltivable_mask]=classes[4]
        classes_mask = np.logical_or(tara50_mask,np.logical_or(tara0_mask,tara20_mask))
        classes_mask = np.logical_or(no_coltivable_mask,np.logical_or(classes_mask,woods_mask))
        gt[np.logical_not(classes_mask)]=0
        rc_tara0 = np.argwhere(tara0_mask[0:-patch_size_padded, 0:-patch_size_padded])
        rc_tara20 = np.argwhere(tara20_mask[0:-patch_size_padded, 0:-patch_size_padded])
        rc_tara50 = np.argwhere(tara50_mask[0:-patch_size_padded, 0:-patch_size_padded])
        rc_woods = np.argwhere(woods_mask[0:-patch_size_padded, 0:-patch_size_padded])
        rc_no_coltivable = np.argwhere(no_coltivable_mask[0:-patch_size_padded, 0:-patch_size_padded])
        rc_UPAS = np.argwhere(gt[0:-patch_size_padded, 0:-patch_size_padded]!=0)
        
        if np.sum(tara0_mask)==0 and np.sum(tara20_mask)==0 and np.sum(tara50_mask)==0 and np.sum(woods_mask)==0 and np.sum(no_coltivable_mask)==0 :
            continue
    
        # sample patches and write coordinate of origin to output csv-file
        sample_patches_of_class(rc_tara0, rc_UPAS, patches_per_tile, classes[0], gt, patch_size_padded, coordspath+coordsfilename,d)
        sample_patches_of_class(rc_tara20, rc_UPAS, patches_per_tile, classes[1], gt, patch_size_padded, coordspath+coordsfilename,d)
        sample_patches_of_class(rc_tara50, rc_UPAS, patches_per_tile, classes[2], gt, patch_size_padded, coordspath+coordsfilename,d)
        sample_patches_of_class(rc_woods, rc_UPAS, patches_per_tile, classes[3], gt, patch_size_padded, coordspath+coordsfilename,d)
        sample_patches_of_class(rc_no_coltivable, rc_UPAS, patches_per_tile, classes[4], gt, patch_size_padded, coordspath+coordsfilename,d)
    
        del gt
        gc.collect()
        if i_lap+1 % 10 == 0: 
            print('\r {}/{}'.format(i_lap+1, len(dirs)),end='')
Example #31
0
def tile_to_csv_grid(inputpath,
                     coordspath,
                     coordsfilename,
                     patch_size_padded,
                     classes=[638, 659, 654, 650, 770],
                     tile=None,
                     fraction=1,
                     final=False):
    """ save location of patches in csv-file.
    
    Locations of top-left corner-pixel of patches are saved in csv. These corner
    pixels are based on a grid overlaying the original tiles.
    
    parameters
    ----------
        inputpath: string
            path to folders with tiles. Each tile should be in separate folder  
        coordspath: string
            path to outputfolder to save file with coordinates
        coordsfilename: string
            output filename, extention should be '.csv'
        patch_size_padded: int
            size of patch to extract including padding to be able to predict 
            full image.
        classes: list
            classes to be considered in the ground truth
        tile: string (optional)
            name of folder where tile is stored
        fraction: float (optional)
            float between 0 and 1, fraction of pixels that must be in 'classes'
    
    calls
    -----
        find_patches_options()
        plot_patch_options()
    
    output
    -------
        outputfile: csv
            each row contains row + column of top-left pixel of patch,tilename and number of rows/cols in original tile: 
            row,column,tile,tile-rows, tile-cols
            saved at coordspath with name coordsfilename
    """

    # init
    if tile != None:
        dirs = [tile]
    else:
        dirs = list_dir(inputpath)

    if not os.path.isdir(coordspath):
        os.makedirs(coordspath)

    for i_lap, d in enumerate(dirs):

        # get tile
        path_shp = inputpath + d + '/tare.tif'
        ds = gdal.Open(path_shp, gdal.GA_ReadOnly)
        gt = ds.GetRasterBand(1).ReadAsArray()
        gt = np.uint16(gt)
        ds = None

        # take care of classes
        gt[np.where(gt == 656)] = 650
        gt[np.where(gt == 780)] = 770
        gt[np.isin(gt, classes) == False] = 0

        # get options
        if i_lap == 0:
            options = find_patches_options(gt, patch_size_padded, d, fraction,
                                           final)
        else:
            options = options.append(
                find_patches_options(gt, patch_size_padded, d, fraction,
                                     final))

        if i_lap % 50 == 0:
            print('\r {}/{}'.format(i_lap, len(dirs)), end='')

    # save in csv
    options['row'] = options['row'].multiply(patch_size_padded)
    options['col'] = options['col'].multiply(patch_size_padded)
    options.to_csv(coordspath + coordsfilename, index=False)
Example #32
0
def csv_to_patch(inputpath,
                 dtmpath,
                 patchespath,
                 coordsfile,
                 patch_size,
                 classes,
                 resolution,
                 tile=None):
    """ extract the patches from the original images, normalize and save.
        Ground truth is converted to one-hot labels. 
    
    Parameters
    ----------
        inputpath: string
            path to folders with tiles. Each tile should be in separate folder
        dtmpath: string 
            path to folder containing a dtm (will be resampled to same resolution)
        patchespath: string 
            path to outputfolder to save the patches
        coordsfile: csv-file
            path to file where the coordinates are saved
        patch_size: int
            size of patch to extract. Final extracted patches will include padding 
            to be able to predict full image.
        classes: list
            list with classes to be predicted
        resolution: int
            either 20 for 20cm or 1 for 1m
        tile: string
            name of folder where tile is stored
    Calls
    -----
        read_patch() 
        read_patch_1m()
        to_categorical_classes()
    
    Output
    ------
        patches saved at patchespath in two folders: 
            images and labels.
    """
    imagespath = patchespath + 'images/'
    labelspath = patchespath + 'labels/'

    if not os.path.isdir(patchespath):
        os.makedirs(patchespath)
    if not os.path.isdir(imagespath):
        os.makedirs(imagespath)
    if not os.path.isdir(labelspath):
        os.makedirs(labelspath)

    if tile != None:
        dirs = [tile]
    else:
        dirs = list_dir(inputpath)
    coords = pd.read_csv(coordsfile, sep=',')
    patch_size_padded = int(patch_size * 3)

    if resolution == 20:
        # resample dtm to 20cmx20xm
        for d in dirs:
            if not os.path.isdir(dtmpath + d + '/'):
                os.makedirs(dtmpath + d + '/')
            input_file = inputpath + d + '/dtm135.tif'
            shadow_file = inputpath + d + '/' + d + '_NIR.tif'
            dtm_file = dtmpath + d + '/dtm135_20cm.tif'

            ds = gdal.Open(shadow_file)
            width = ds.RasterXSize
            height = ds.RasterYSize
            ds = gdal.Warp(dtm_file,
                           input_file,
                           format='GTiff',
                           width=width,
                           height=height,
                           resampleAlg=1)
            ds = None

        # extract patches
        for idx in range(len(coords)):
            im, gt = read_patch(inputpath, dtmpath, coords, patch_size_padded,
                                idx, classes)
            np.save(imagespath + str(idx) + '.npy', im)
            np.save(labelspath + str(idx) + '.npy', gt)
            if idx % 500 == 0:
                print('\r {}/{}'.format(idx, len(coords)), end='')

    elif resolution == 1:
        warpedtile = None
        for idx, d in enumerate(coords['tiles']):
            # resample rgb + nir to 1m (keep in memory (no space on disk))
            if not d == warpedtile:
                dtm_file = inputpath + d + '/dtm135.tif'
                nir_file = inputpath + d + '/' + d + '_NIR.tif'
                rgb_file = inputpath + d + '/' + d + '_RGB.tif'
                gt_file = inputpath + d + '/tare.tif'

                ds = gdal.Open(dtm_file)
                width = ds.RasterXSize
                height = ds.RasterYSize
                nir_1m = gdal.Warp("",
                                   nir_file,
                                   format='mem',
                                   width=width,
                                   height=height,
                                   resampleAlg=1)
                rgb_1m = gdal.Warp("",
                                   rgb_file,
                                   format='mem',
                                   width=width,
                                   height=height,
                                   resampleAlg=1)
                gt_1m = gdal.Warp("",
                                  gt_file,
                                  format='mem',
                                  width=width,
                                  height=height,
                                  resampleAlg=0)
                warpedtile = d
                ds = None

            # extract patches
            im, gt = read_patch_1m(rgb_1m, nir_1m, dtm_file, gt_1m, coords,
                                   patch_size_padded, idx, classes)
            np.save(imagespath + str(idx) + '.npy', im)
            np.save(labelspath + str(idx) + '.npy', gt)
            if idx % 500 == 0:
                print('\r {}/{}'.format(idx, len(coords)), end='')
    else:
        print("Only resoltions of 20cmx20xm (20) and 1mx1m (1) are supported.")
Example #33
0
 def get_queue(self):
     for f in sorted(list_dir(config.torrent_queue_dir), key = lambda f: os.stat(f).st_ctime):
         yield self.get_torrent_info(os.path.basename(f))
Example #34
0
def return_data(args):
    # train_dset_dir = args.train_dset_dir
    # test_dset_dir = args.test_dset_dir

    set_seed(args.seed)

    train_batch_size = args.train_batch_size
    test_batch_size = args.test_batch_size
    num_workers = args.num_workers
    image_size = args.image_size
    time_window = args.time_window
    darker_threshold = args.darker_threshold
    trivial_augmentation = bool(args.trivial_augmentation)
    sliding_augmentation = bool(args.sliding_augmentation)

    transform_list = [
        transforms.Resize((image_size, image_size)),
        RetouchDarker(darker_threshold)
    ]

    if args.channel == 1:
        transform_list.append(transforms.Grayscale(num_output_channels=1))

    if trivial_augmentation:
        trivial_transform_list = [
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.1),
            transforms.RandomResizedCrop(image_size,
                                         scale=(0.8, 1.0),
                                         ratio=(1, 1)),
            RandomNoise(mean=0, std=10),
        ]
        transform_list.append(transforms.RandomChoice(trivial_transform_list))

    if sliding_augmentation:
        transform_list.append(RandomTimeWindow(time_window=time_window))
    else:
        transform_list.append(TimeWindow(time_window=time_window))

    transform_list.append(transforms.ToTensor())

    if args.channel == 1:
        transform_list.append(transforms.Normalize([0.5], [0.5]))
    else:
        transform_list.append(
            transforms.Normalize([0.5] * args.channel, [0.5] * args.channel))
    print(transform_list)
    transform = transforms.Compose(transform_list)

    # if args.channel == 1:
    #     transform = transforms.Compose([
    #         transforms.Resize((image_size, image_size)),
    #         transforms.Grayscale(num_output_channels=1),
    #         TimeWindow(time_window=time_window),
    #         transforms.ToTensor(),
    #         transforms.Normalize([0.5], [0.5]),
    #     ])
    # else:
    #     transform = transforms.Compose([
    #         transforms.Resize((image_size, image_size)),
    #         TimeWindow(time_window=time_window),
    #         transforms.ToTensor(),
    #         transforms.Normalize([0.5] * args.channel, [0.5] * args.channel),
    #     ])
    """
    train_root = Path(train_dset_dir)
    test_root = Path(test_dset_dir)
    train_kwargs = {'root': train_root, 'transform': transform}
    test_kwargs = {'root': test_root, 'transform': transform}
    dset = ImageFolder

    train_data = dset(**train_kwargs)
    test_data = dset(**test_kwargs)
    train_loader = DataLoader(train_data,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=num_workers,
                              pin_memory=True,
                              drop_last=True)
    test_loader = DataLoader(test_data,
                             batch_size=test_batch_size,
                             shuffle=True,
                             num_workers=num_workers,
                             pin_memory=True,
                             drop_last=True)

    data_loader = dict()
    data_loader['train'] = train_loader
    data_loader['test'] = test_loader
    """
    def _init_fn(worker_id):
        np.random.seed(int(args.seed))

    data_loader = {}
    if args.incremental:
        root = './data/per_acitivity/'
    else:
        root = './data/per_subject/'

    num_tasks = len(list_dir(root))

    train_imagefolders = []
    test_imagefolders = []
    for i in range(num_tasks):

        data_loader['task{}'.format(i)] = {}

        target_subject = join(root, 'Subject{}'.format(i + 1))

        train_data = ImageFolder(root=target_subject + '/train',
                                 transform=transform)
        test_data = ImageFolder(root=target_subject + '/test',
                                transform=transform)

        train_imagefolders.append(train_data)
        test_imagefolders.append(test_data)

    if args.continual:
        for i in range(num_tasks):
            # data loader가 cnn model 학습 이전에 이미 생성완료되어 선언되므로 여기서 replay를 하는건 불가능.
            if args.task_upper_bound:
                train_dataset = ConcatDataset(train_imagefolders[:i + 1])
                test_dataset = ConcatDataset(test_imagefolders[:i + 1])
            else:
                train_dataset = train_imagefolders[i]
                test_dataset = test_imagefolders[i]
            train_loader = DataLoader(train_dataset,
                                      batch_size=train_batch_size,
                                      shuffle=True,
                                      num_workers=num_workers,
                                      pin_memory=True,
                                      drop_last=True,
                                      worker_init_fn=_init_fn)
            test_loader = DataLoader(test_dataset,
                                     batch_size=test_batch_size,
                                     shuffle=True,
                                     num_workers=num_workers,
                                     pin_memory=True,
                                     drop_last=True,
                                     worker_init_fn=_init_fn)

            data_loader['task{}'.format(i)]['train'] = train_loader
            data_loader['task{}'.format(i)]['test'] = test_loader
    else:
        num_tasks = 1
        train_data_concat = ConcatDataset(train_imagefolders)
        test_data_concat = ConcatDataset(test_imagefolders)

        train_loader = DataLoader(train_data_concat,
                                  batch_size=train_batch_size,
                                  shuffle=True,
                                  num_workers=num_workers,
                                  pin_memory=True,
                                  drop_last=True,
                                  worker_init_fn=_init_fn)
        test_loader = DataLoader(test_data_concat,
                                 batch_size=test_batch_size,
                                 shuffle=True,
                                 num_workers=num_workers,
                                 pin_memory=True,
                                 drop_last=True,
                                 worker_init_fn=_init_fn)

        data_loader['train'] = train_loader
        data_loader['test'] = test_loader

    return data_loader, num_tasks, transform
Example #35
0
def main(pool, conf):
    ast_list = list_dir(conf.ast_dir)
    ast_data = pool_map(pool, fragmentize, ast_list)
    return ast_data
Example #36
0
def pre_play(nzbname, mode = None):
    iscanceled = False
    folder = INCOMPLETE_FOLDER + nzbname
    sab_nzo_id = SABNZBD.nzo_id(nzbname)
    file_list = utils.list_dir(folder)
    sab_file_list = []
    multi_arch_list = []
    if sab_nzo_id is None:
        sab_nzo_id_history = SABNZBD.nzo_id_history(nzbname)
    else:
        sab_file_list = SABNZBD.file_list(sab_nzo_id)
        file_list.extend(sab_file_list)
        sab_nzo_id_history = None
    file_list = utils.sorted_rar_file_list(file_list)
    multi_arch_list = utils.sorted_multi_arch_list(file_list)
    # Loop though all multi archives and add file to the 
    play_list = []
    for arch_rar, byte in multi_arch_list:
        if sab_nzo_id is not None:
            t = Thread(target=to_bottom, args=(sab_nzo_id, sab_file_list, file_list,))
            t.start()
            iscanceled = get_rar(folder, sab_nzo_id, arch_rar)
        if iscanceled:
            break
        else:
            if sab_nzo_id:
                set_streaming(sab_nzo_id)
            # TODO is this needed?
            time.sleep(1)
            # RAR ANALYSYS #
            in_rar_file_list = utils.rar_filenames(folder, arch_rar)
            movie_list = utils.sort_filename(in_rar_file_list)
            # Make sure we have a movie
            if not (len(movie_list) >= 1):
                xbmc.executebuiltin('Notification("NZBS","Not a movie!")')
                break
            # Who needs sample?
            movie_no_sample_list = utils.no_sample_list(movie_list)
            # If auto play is enabled we skip samples in the play_list
            if AUTO_PLAY and mode is not MODE_INCOMPLETE_LIST:
                for movie_file in movie_no_sample_list:
                    play_list.append(arch_rar)
                    play_list.append(movie_file)
            else:
                for movie_file in movie_list:
                    play_list.append(arch_rar)
                    play_list.append(movie_file)
            # If the movie is a .mkv we need the last rar
            if utils.is_movie_mkv(movie_list) and sab_nzo_id:
                # If we have a sample or other file, the second rar is also needed..
                if len(in_rar_file_list) > 1:
                    second_rar = utils.find_rar(file_list, 0)
                    iscanceled = get_rar(folder, sab_nzo_id, second_rar)
                last_rar = utils.find_rar(file_list, -1)
                iscanceled =  get_rar(folder, sab_nzo_id, last_rar)
                if iscanceled: 
                    break 
    if iscanceled:
        return
    else:
        rar_file_list = [x[0] for x in file_list]
        if (len(rar_file_list) >= 1):
            if AUTO_PLAY and ( mode is None or mode is MODE_JSONRPC):
                video_params = dict()
                if not mode:
                    video_params['mode'] = MODE_AUTO_PLAY
                else:
                    video_params['mode'] = MODE_JSONRPC
                video_params['play_list'] = urllib.quote_plus(';'.join(play_list))
                video_params['file_list'] = urllib.quote_plus(';'.join(rar_file_list))
                video_params['folder'] = urllib.quote_plus(folder)
                return play_video(video_params)   
            else:
                return playlist_item(play_list, rar_file_list, folder, sab_nzo_id, sab_nzo_id_history)
        else:
            xbmc.executebuiltin('Notification("NZBS","No rar\'s in the NZB!!")')
            return
Example #37
0
def main(pool, conf):
    ast_list = list_dir(conf.ast_dir)
    ast_list = pool_map(pool, strip, ast_list, conf=conf)