Пример #1
0
    def initDialog(self):
        print("init taskdata dialog")
        # init widgets
        self.ui.comboUser.setModel(self._domainModel.dicts[const.DICT_USER])
        self.ui.comboProject.setModel(self._domainModel.dicts[const.DICT_PROJECT])
        self.ui.comboInit.setModel(self._domainModel.dicts[const.DICT_INITIATOR])
        self.ui.comboPriority.setModel(self._domainModel.dicts[const.DICT_PRIORITY])

        walk(lambda model: model.makeUncheckable(), self._domainModel.dicts.values())

        # for k, v in self._domainModel.dicts.items():
        #     v.itemsCheckable = False
        # self.ui.tableProduct.setModel(self._productModel)
        # self._productModel.initModel(self._productList)

        # setup signals
        self.ui.btnOk.clicked.connect(self.onBtnOkClicked)
        self.ui.btnUserAdd.clicked.connect(self.onBtnUserAddClicked)
        self.ui.btnUserEdit.clicked.connect(self.onBtnUserEditClicked)
        self.ui.btnProjectAdd.clicked.connect(self.onBtnProjectAddClicked)
        self.ui.btnProjectEdit.clicked.connect(self.onBtnProjectEditClicked)
        self.ui.btnInitAdd.clicked.connect(self.onBtnInitAddClicked)
        self.ui.btnInitEdit.clicked.connect(self.onBtnInitEditClicked)

        # set widget data
        if self._currentItem is None:
            self.resetWidgets()
        else:
            self.updateWidgets()
Пример #2
0
def register_apps(app):
  for pkg in w_utils.find_modules('apps', True):
    pkg_views = '%s.views' % pkg
    objs = [get_module_obj(pkg_views, obj) for obj in ['bpa', 'bp']]
    funcy.walk(funcy.silent(app.register_blueprint), objs)
    app_init = get_module_obj(pkg, 'app_init')
    if app_init:
      app_init(app)
Пример #3
0
    def preload(self):
        """只需运行一次,先将fromdate前的数据都load到preload_bar_list"""
        """若没有fromdate,则不用load"""
        coll = self.set_collection()

        if self.fromdate:
            buff_date = arrow.get(self.fromdate).replace(days=-self.buffer_days)
            buff_date = buff_date.format('YYYY-MM-DD HH:mm:ss')
            self.set_iteral_buffer(coll.find({'date': {'$gt': buff_date, '$lt': self.fromdate}}))
        else:
            self.set_iteral_buffer([])

        self.preload_bar_list = [i for i in self.iteral_buffer]
        fy.walk(lambda x: x.pop('_id'), self.preload_bar_list)
        self.preload_bar_list.reverse()
Пример #4
0
    def __getitem__(self, item):
        (input_fpath, targets_fpaths), augmentation = self.data[item]

        input_img = self._load_input_image(input_fpath)
        width, height = input_img.size
        input_img = self.resize(input_img)

        if self.input_preprocess is not None:
            input_img = self.input_preprocess(input_img)

        input_img = augmentation(input_img)
        input_img = self.to_tensor(input_img)
        input_img = self.normalize(input_img)

        target_imgs = None
        if self.with_targets:
            target_imgs = self.selection_method(targets_fpaths)
            target_imgs = funcy.walk(self.to_tensor, target_imgs)

        fname = os.path.basename(input_fpath).split(".")[0]

        if self.selection_method == self._random_selection:
            target_imgs = target_imgs[0]

        return input_img, target_imgs, fname, (width, height)
Пример #5
0
    def _cache_key(self):
        """
        Compute a cache key for this queryset
        """
        md = md5()
        md.update('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
        # Vary cache key for proxy models
        md.update('%s.%s' % (self.model.__module__, self.model.__name__))
        # Protect from field list changes in model
        md.update(stamp_fields(self.model))
        # Use query SQL as part of a key
        try:
            sql, params = self.query.get_compiler(self._db or DEFAULT_DB_ALIAS).as_sql()
            try:
                sql_str = sql % params
            except UnicodeDecodeError:
                sql_str = sql % walk(force_text, params)
            md.update(smart_str(sql_str))
        except EmptyResultSet:
            pass
        # If query results differ depending on database
        if self._cacheprofile and not self._cacheprofile['db_agnostic']:
            md.update(self.db)
        # Thing only appeared in Django 1.9
        it_class = getattr(self, '_iterable_class', None)
        if it_class:
            md.update('%s.%s' % (it_class.__module__, it_class.__name__))
        # 'flat' attribute changes results formatting for values_list() in Django 1.8 and earlier
        if hasattr(self, 'flat'):
            md.update(str(self.flat))

        return 'q:%s' % md.hexdigest()
Пример #6
0
    def __init__(self, fpath: str, augmentations: List = None, input_preprocess: Callable = None,
                 target_preprocess: Callable = None, with_targets: bool = True, shape: Tuple = (256, 256)):
        if not os.path.isfile(fpath):
            raise FileNotFoundError("Could not find dataset file: '{}'".format(fpath))
        self.with_targets = with_targets
        self.size = shape

        if augmentations:
            augmentations = [lambda x: x] + augmentations
        else:
            augmentations = [lambda x: x]

        self.resize = Resize(size=self.size)
        # self.normalize = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        self.normalize = Normalize([0.485, 0.456, 0.406], [1.0, 1.0, 1.0])
        self.to_tensor = ToTensor()
        self.input_preprocess = input_preprocess
        self.target_preprocess = target_preprocess

        with open(fpath, "r") as f:
            lines = filter(lambda l: bool(l), f.read().split("\n"))
            if self.with_targets:
                data = [(input.strip(), target.strip())
                        for input, target in funcy.walk(lambda l: l.split(" "), lines)]
            else:
                data = [(input.strip(), None) for input in lines]

        self.data = [(d, augmentation) for augmentation in augmentations for d in data]
Пример #7
0
    def _cache_key(self, prefix=True):
        """
        Compute a cache key for this queryset
        """
        md = md5()
        md.update('%s.%s' %
                  (self.__class__.__module__, self.__class__.__name__))
        # Vary cache key for proxy models
        md.update('%s.%s' % (self.model.__module__, self.model.__name__))
        # Protect from field list changes in model
        md.update(stamp_fields(self.model))
        # Use query SQL as part of a key
        try:
            sql, params = self.query.get_compiler(self.db).as_sql()
            try:
                sql_str = sql % params
            except UnicodeDecodeError:
                sql_str = sql % walk(force_text, params)
            md.update(smart_str(sql_str))
        except EmptyResultSet:
            pass
        # If query results differ depending on database
        if self._cacheprofile and not self._cacheprofile['db_agnostic']:
            md.update(self.db)
        # Iterable class pack results diffrently
        it_class = self._iterable_class
        md.update('%s.%s' % (it_class.__module__, it_class.__name__))

        cache_key = 'q:%s' % md.hexdigest()
        return self._prefix + cache_key if prefix else cache_key
Пример #8
0
 def _drop_duplicates(self):
     """删除重复数据"""
     coll = self.__set_collection()
     c = coll.aggregate([{
         "$group": {
             "_id": {
                 'date': '$date'
             },
             "count": {
                 '$sum': 1
             },
             "dups": {
                 '$addToSet': '$_id'
             }
         }
     }, {
         '$match': {
             'count': {
                 "$gt": 1
             }
         }
     }])
     data = [i for i in c]
     duplicates = fy.walk(self.__get_dups_id, data)
     dups_id_list = fy.cat(duplicates)
     for i in dups_id_list:
         coll.delete_one({'_id': i})
     print("OK, duplicates droped! Done!")
Пример #9
0
def save_2mgdb(client, param, db):
    collection = db[param['api_param']['granularity']]

    for r in InstrumentsCandlesFactory(instrument=param['instrument'],
                                       params=param['api_param']):
        client.request(r)

        # data type
        # r.response:
        # {'instrument': 'EUR_USD',
        #  'granularity': 'S5',
        #  'candles': [{'complete': True, 'volume': 1, 'time': '2018-01-01T22:00:00.000000000Z', 'mid': {'o': '1.20052', 'h': '1.20052', 'l': '1.20052', 'c': '1.20052'}}]
        # }
        candles = r.response.get('candles')  # candles is a list
        if (candles == []):
            print(
                '\t * skip to write next: find empty data (with candles == [])'
            )
            continue
        else:
            print('\t - download progress: {}'.format(candles[0].get('time')))

        # write to mongodb
        bar_list = fy.walk(normalize_raw_candles, candles)

        start_time = time.time()
        for bar in bar_list:
            collection.insert_one(bar)
        end_time = time.time()
        print('\t - it took {} second to write to mongodb '.format(end_time -
                                                                   start_time))

    length = drop_duplicates_func(collection)
    print(f'\t <<collection:{collection}>> has been drop {length} duplicates!')
Пример #10
0
    def _cache_key(self, prefix=True):
        """
        Compute a cache key for this queryset
        """
        md = md5()
        md.update('%s.%s' %
                  (self.__class__.__module__, self.__class__.__name__))
        # Vary cache key for proxy models
        md.update('%s.%s' % (self.model.__module__, self.model.__name__))
        # Protect from field list changes in model
        md.update(stamp_fields(self.model))
        # Use query SQL as part of a key
        try:
            sql, params = self.query.get_compiler(self.db).as_sql()
            try:
                sql_str = sql % params
            except UnicodeDecodeError:
                sql_str = sql % walk(force_text, params)
            md.update(smart_str(sql_str))
        except EmptyResultSet:
            pass
        # If query results differ depending on database
        if self._cacheprofile and not self._cacheprofile['db_agnostic']:
            md.update(self.db)
        # Thing only appeared in Django 1.9
        it_class = getattr(self, '_iterable_class', None)
        if it_class:
            md.update('%s.%s' % (it_class.__module__, it_class.__name__))
        # 'flat' attribute changes results formatting for values_list() in Django 1.8 and earlier
        if hasattr(self, 'flat'):
            md.update(str(self.flat))

        cache_key = 'q:%s' % md.hexdigest()
        return self._prefix + cache_key if prefix else cache_key
Пример #11
0
def cookiecutter(*args, **kwargs) -> str:
    """Call cookiecutter.main.cookiecutter after stringifying paths

    Return:
        project directory path
    """
    args = fy.walk(_stringify_path, args)
    kwargs = fy.walk_values(_stringify_path, kwargs)
    return _cookiecutter(*args, **kwargs)
Пример #12
0
def load_prefix_lists():
    all_words = load_all_words()
    prefix_lists = {}
    for i in range(1, 8):  # only need prefixes up to 7, right?
        # ignore words that are the length of the prefix or less
        long_enough_words = f.select(lambda d: len(d) > i, all_words)
        # grab the first i letters
        prefix_lists[i] = set(f.walk(lambda d: d[0:i], long_enough_words))
        # this line is probably extraneous at this point, right? 
        # prefix_lists[i]= f.select(lambda d: len(d)==i, prefix_lists[i])
    return prefix_lists
Пример #13
0
def saveOnus_f(ip):
    mark, rslt = Zte.get_onus(ip)[:-1]
    if mark == 'success' and rslt:
        _ff = lambda x: walk(partial(merge, (ip, x[0])), x[1])
        rslt1 = lmapcat(_ff, rslt)
        with open(result_file, 'a') as frslt:
            for record in rslt1:
                ip, port, onuid, loid = record
                frslt.write("{ip},{port},{onuid},{loid}\n"
                            .format(ip=ip, port=port, onuid=onuid, loid=loid))
    with open(log_file, 'a') as flog:
        flog.write("{ip}:{mark}\n".format(ip=ip, mark=mark))
Пример #14
0
    def rolling(self, start, end):
        if start != 0:
            delta = end - start if end != float('inf') else end
            return self.rolling(0, delta) << start

        def apply_window(time_val):
            t, _ = time_val
            values = self[start + t:end + t].values()
            # Note: {} forces application of tuple.
            values = fn.merge_with(tuple, {}, *values)
            return (t, values)

        return self.evolve(data=fn.walk(apply_window, self.data),
                           end=self.end - end if end < self.end else self.end)
Пример #15
0
def saveOnus_f(ip):
    mark, rslt = Zte.get_onus(ip)[:-1]
    if mark == 'success' and rslt:
        _ff = lambda x: walk(partial(merge, (ip, x[0])), x[1])
        rslt1 = lmapcat(_ff, rslt)
        with open(result_file, 'a') as frslt:
            for record in rslt1:
                ip, port, onuid, loid = record
                frslt.write("{ip},{port},{onuid},{loid}\n".format(ip=ip,
                                                                  port=port,
                                                                  onuid=onuid,
                                                                  loid=loid))
    with open(log_file, 'a') as flog:
        flog.write("{ip}:{mark}\n".format(ip=ip, mark=mark))
Пример #16
0
    def _combine_and_insert(self, data):
        data = data['candles']
        candle_list = fy.walk(self._normalize, data)

        lenth = len(candle_list)  
        coll = self._set_collection()

        i = 0

        for bar in candle_list:
            i += 1
            coll.insert_one(bar)

            if self.print_log:
                date = bar['date']
Пример #17
0
def correct_text_msg(from_text, to_text, insert_between=''):
    differences = unified_diff(from_text.split(), to_text.split())
    differences = drop(3, differences)
    differences = group_differences(differences)
    res = []
    for diff in differences:
        type_, text = diff[0], diff[1:]
        if type_ is '-':
            res += [walk(strike, text)]
            res += insert_between
        elif type_ is '+':
            res += [embolden(text)]
        else:
            res += [text]
    return ' '.join(res)
Пример #18
0
def deepxrtype(obj):
    """
    Recursive variant of `xrtype()`.

    >>> deepxrtype(1)
    (<class 'int'>,)
    >>> deepxrtype('x')
    (<class 'str'>,)
    >>> deepxrtype((1, 'x'))
    (<class 'int'>, <class 'str'>)
    >>> deepxrtype((1, 'x', ('y', 'z')))
    (<class 'int'>, <class 'str'>, (<class 'str'>, <class 'str'>))
    """

    return fy.walk(xrtype, box(obj))
Пример #19
0
        def _drop_duplicates(self):
        """删除重复数据"""
        coll = self._set_collection()
        c = coll.aggregate([{"$group":
                                 {"_id": {'id': '$id'},      #记住去了解一下此处是如何去重的
                                  "count": {'$sum': 1},            #$addToSet也是追加内容且会过滤数据
                                  "dups": {'$addToSet': '$_id'}}},  #$push代表追加内容但是不会过滤数据
                            {'$match': {'count': {"$gt": 1}}}
                            ]
                           )
        data = [i for i in c]
        duplicates = fy.walk(self.__get_dups_id, data)
        dups_id_list = fy.cat(duplicates)

        for i in dups_id_list:
            coll.delete_one({'_id': i})
Пример #20
0
    def __init__(self,
                 fpath: str,
                 augmentations: List = None,
                 input_preprocess: Callable = None,
                 target_preprocess: Callable = None,
                 with_targets: bool = True,
                 select="random",
                 shape: Tuple = (256, 256)):
        if not os.path.isfile(fpath):
            raise FileNotFoundError(
                "Could not find dataset file: '{}'".format(fpath))

        self.with_targets = with_targets
        self.size = shape

        if augmentations:
            augmentations = [lambda x: x] + augmentations
        else:
            augmentations = [lambda x: x]

        self.resize = Resize(size=self.size)
        # self.normalize = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        self.normalize = Normalize([0.485, 0.456, 0.406], [1.0, 1.0, 1.0])
        self.to_tensor = ToTensor()
        self.input_preprocess = input_preprocess
        self.target_preprocess = target_preprocess

        with open(fpath, "r") as f:
            lines = filter(lambda l: bool(l), f.read().split("\n"))
            if self.with_targets:
                data = []
                for line in lines:
                    fpaths = line.split(" ")
                    input_ = fpaths[0].strip()
                    targets = funcy.walk(lambda f: f.strip(), fpaths[1:])

                    data.append((input_, targets))
            else:
                data = [(input.strip(), None) for input in lines]

        if select == "all":
            self.selection_method = self._select_all
        else:
            self.selection_method = self._random_selection

        self.data = [(d, augmentation) for augmentation in augmentations
                     for d in data]
Пример #21
0
    def __init__(self, fpath, augmentation=None, with_targets=True):
        if not os.path.isfile(fpath):
            raise FileNotFoundError(
                "Could not find dataset file: '{}'".format(fpath))

        if not augmentation:
            augmentation = []
        n_augmentation = math.factorial(
            len(augmentation)) if len(augmentation) > 0 else 0
        augmentation_combinations = list(
            itertools.product([0, 1], repeat=n_augmentation))

        self.with_targets = with_targets
        self.size = (180, 135)

        self.input_resize = Scale(self.size, Image.BILINEAR)
        self.target_resize = Scale(self.size, Image.NEAREST)
        self.input_transform = Compose([
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
        self.target_transform = Compose([
            ToLabel(),
            ReLabel(255, 1),
        ])

        self.augmentation = augmentation

        with open(fpath, "r") as f:
            lines = filter(lambda l: bool(l), f.read().split("\n"))
            if self.with_targets:
                data = [(input.strip(), target.strip())
                        for input, target in funcy.walk(
                            lambda l: l.split(" "), lines)]
            else:
                data = [(input.strip(), None) for input in lines]

        self.data = [(d, transform_list)
                     for transform_list in augmentation_combinations
                     for d in data]
Пример #22
0
    def __init__(self,
                 fpath,
                 augmentation=None,
                 input_preprocess=None,
                 target_preprocess=None,
                 with_targets=True):
        self.with_targets = with_targets

        if not augmentation:
            augmentation = []
        if not input_preprocess:
            input_preprocess = []
        if not target_preprocess:
            target_preprocess = input_preprocess

        assert os.path.exists(fpath), "File path doesn't exists"
        with open(fpath, "r") as f:
            lines = filter(lambda l: bool(l), f.read().split("\n"))

            if self.with_targets:
                data = [(input.strip(), target.strip())
                        for input, target in funcy.walk(
                            lambda l: l.split(" "), lines)]
            else:
                data = [(input.strip(), None) for input in lines]

        n_augmentation = math.factorial(len(augmentation))
        augmentation_combinations = list(
            itertools.product([0, 1], repeat=n_augmentation))

        self.augmentation = augmentation
        self.input_preprocess = input_preprocess
        self.target_preprocess = target_preprocess
        self.data = [(d, transform_list)
                     for transform_list in augmentation_combinations
                     for d in data]
def transform(ast):
    """Transform AST into grouped (by node type) dictionary of metadata dicts.

    """
    return walk(process, categorize(ast))
Пример #24
0
def get_options(default, path):
    return funcy.join([default] + funcy.walk(loadexp, path))
Пример #25
0
# but what?
# some sort of dictionary of word beginnings
# ['apple','ant','cat','corn','car']
# what are we trying to do? know if any words start with a string.
# that way if we don't check for 3 letter words that start with xqu, then 4 letter words, etc.
# { 'a': {'p':{'p':{'l':{'e':'$'}}} 'n':{'t':'$'}} ...} something like that?


def test_perf(num=100):
    n = num
    l7 = load_word_set_by_length(7, as_set=False)
    s7 = load_word_set_by_length(7)

    def check_list():
        return "wishful" in l7
    
    def check_set():
        return "wishful" in s7
    l_time = float(timeit.timeit(check_list, number=n))
    s_time = float(timeit.timeit(check_set, number=n))
    ratio = l_time/s_time

    print "list takes %f times more than the set, using %d repetitions" % (ratio, n)
    print "list: %f " % l_time
    print "set: %f" % s_time


if __name__ == "__main__":
    # test_perf(num=100)
    print f.walk(len, load_prefix_lists().values())
Пример #26
0
def transform(ast):
    """Transform AST into grouped (by node type) dictionary of metadata dicts.

    """
    return walk(process, categorize(ast))
Пример #27
0
 def template_name(self, obj):
     try:
         return walk(reversed, settings.KONOHA_TEMPLATES)[obj.template]
     except KeyError:
         return obj.template
Пример #28
0
def run_epoch(phase, epoch, model, dataloader, optimizer, criterion, scheduler, writer):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    progress_bar = tqdm(dataloader, desc="Epoch {} - {}".format(epoch, phase))
    training = phase == "train"

    if training:
        model.train()
    else:
        model.eval()

    losses = []
    jaccards = []
    jaccards_threshold = []
    dices = []
    for i, (inputs, targets, fname, (_, _)) in enumerate(progress_bar):
        inputs = Variable(inputs, requires_grad=True).to(device)

        if isinstance(targets, list):
            targets = funcy.walk(lambda target: Variable(target, requires_grad=True).to(device), targets)
        else:
            targets = Variable(targets, requires_grad=True).to(device)

        optimizer.zero_grad()
        with torch.set_grad_enabled(training):
            outputs = model(inputs)

            if isinstance(targets, list):
                loss = min(funcy.walk(lambda target: criterion(outputs, target), targets))
                jaccard = max(funcy.walk(lambda target: evaluate_jaccard(outputs, target), targets))
            else:
                loss = criterion(outputs, targets)
                jaccard = evaluate_jaccard(outputs, targets)
            jaccard_threshold = jaccard.item() if jaccard.item() > 0.65 else 0.0
            dice = evaluate_dice(jaccard.item())

            if training:
                loss.backward()
                optimizer.step()
                scheduler.batch_step()

            losses.append(loss.item())
            jaccards.append(jaccard.item())
            jaccards_threshold.append(jaccard_threshold)
            dices.append(dice)
            progress_bar.set_postfix(OrderedDict({"{} loss".format(phase): np.mean(losses),
                                                  "{} jaccard".format(phase): np.mean(jaccards),
                                                  "{} jaccard_threshold".format(phase): np.mean(jaccards_threshold),
                                                  "{} dice".format(phase): np.mean(dices)}))

    mean_loss = np.mean(losses)
    mean_jacc = np.mean(jaccards)
    mean_jacc_threshold = np.mean(jaccards_threshold)
    mean_dice = np.mean(dices)

    loss_tag = "{}.loss".format(phase)
    jacc_tag = "{}.jaccard".format(phase)
    jacc_threshold_tag = "{}.jaccard_threshold".format(phase)
    dice_tag = "{}.dice".format(phase)

    writer.add_scalar(loss_tag, mean_loss, epoch)
    writer.add_scalar(jacc_tag, mean_jacc, epoch)
    writer.add_scalar(jacc_threshold_tag, mean_jacc_threshold, epoch)
    writer.add_scalar(dice_tag, mean_dice, epoch)

    info = {"loss": mean_loss,
            "jaccard": mean_jacc,
            "jaccard_threshold": mean_jacc_threshold,
            "dice": mean_dice}

    return info
Пример #29
0
 def template_name(self, obj):
     try:
         return walk(reversed, settings.KONOHA_TEMPLATES)[obj.template]
     except KeyError:
         return obj.template
Пример #30
0
def walk(f,*seq):
    return F.walk(f,*seq) if seq \
    else lambda *xs: F.walk(f,*xs)
Пример #31
0
 def done(self, code):
     walk(lambda model: model.makeCheckable(), self._domainModel.dicts.values())
     super(DlgTaskData, self).done(code)
Пример #32
0
def transform(transform_fn, pil_image, mult=1, **kwargs):
    img = np.array(pil_image)
    img = mult * transform_fn(img, **kwargs).astype(np.uint8)
    img = Image.fromarray(img)

    return img.point(lambda p: p > 255 // 2 and 255)


########################################################################################################################
#
# Get the file IDs for each dataset
#
########################################################################################################################

full_isic_archive_ids = funcy.walk(lambda fname: fname.split(".")[0],
                                   os.listdir(ISIC_ARCHIVE_INPUTS_PATH))

isic_archive_ids = list(
    sorted(
        set(
            funcy.walk(
                lambda fname: re.match(
                    r"(ISIC_[0-9]+)_segmentation_[0-9]+.png", fname).group(1),
                filter(lambda fname: fname.endswith(".png"),
                       os.listdir(ISIC_ARCHIVE_TARGETS_PATH))))))

isic_2017_ids = list(
    sorted(
        set(
            funcy.walk(
                lambda fname: re.match(r"(ISIC_[0-9]+)_segmentation.png", fname
Пример #33
0
    def _post_clean(self):
        if any(walk(self.errors.__contains__, self.post_clean_free_fields)):
            return

        super()._post_clean()