示例#1
0
    def __init__(self, db_dir:Optional[str]=None, working_dir:Optional[str]=None, verbose:int=2, **kwargs:Any) -> NoReturn:
        """ finished, checked,

        Parameters
        ----------
        db_dir: str, optional,
            storage path of the database
            if not specified, data will be fetched from Physionet
        working_dir: str, optional,
            working directory, to store intermediate files and log file
        verbose: int, default 2,
            log verbosity
        kwargs: auxilliary key word arguments
        """
        super().__init__(db_name="afdb", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)
        self.fs = 250
        self.data_ext = "dat"
        self.ann_ext = "atr"
        self.auto_beat_ann_ext = "qrs"
        self.manual_beat_ann_ext = "qrsc"

        self.all_leads = ["ECG1", "ECG2",]

        self._ls_rec()
        self.special_records = ["00735", "03665"]
        self.qrsc_records = get_record_list_recursive(self.db_dir, self.manual_beat_ann_ext)

        self.class_map = ED(
            AFIB=1, AFL=2, J=3, N=0  # an extra isoelectric
        )
        self.palette = kwargs.get("palette", None)
        if self.palette is None:
            self.palette = ED(
                AFIB="blue", AFL="red", J="yellow",
                # N="green",
                qrs="green",
            )
示例#2
0
    def __init__(self, in_channels: int, **config) -> NoReturn:
        """ NOT finished, NOT checked,
        
        Parameters
        ----------
        in_channels: int,
            number of channels in the input
        config: dict,
            other hyper-parameters of the Module, including
            number of convolutional layers, number of filters for each layer, etc.
        """
        super().__init__()
        self.__in_channels = in_channels
        self.config = ED(deepcopy(config))

        if self.__DEBUG__:
            print(
                f"configuration of ResNetStanford is as follows\n{dict_to_str(self.config)}"
            )

        self.add_module(
            "cba_1",
            Conv_Bn_Activation(
                in_channels=self.__in_channels,
                out_channels=self.config.num_filters_start,
                kernel_size=self.config.filter_lengths,
                stride=1,
                batch_norm=True,
                activation=self.config.block.activation,
                kw_activation=self.config.block.kw_activation,
                kernel_initializer=self.config.block.kernel_initializer,
                kw_initializer=self.config.block.kw_initializer,
            ))

        module_in_channels = self.config.num_filters_start
        for idx, subsample_length in enumerate(self.config.subsample_lengths):
            num_filters = self.get_num_filters_at_index(
                idx, self.config.num_filters_start)
            self.add_module(
                f"resnet_block_{idx}",
                ResNetStanfordBlock(
                    block_index=idx,
                    in_channels=module_in_channels,
                    num_filters=num_filters,
                    filter_length=self.config.filter_lengths,
                    subsample_length=subsample_length,
                    **(self.config.block),
                ))
            module_in_channels = num_filters
示例#3
0
    def get(self, request):
        E = ED()
        E.u, E.k = -1, 1
        E.au = 2
        if not request.session.get('is_login', False):
            return E.au
        u = User.get_via_encoded_id(request.session['uid'])
        if u is None:
            return E.au
        # todo: 更多权限判断
        kwargs = request.GET
        if kwargs.keys() != set():
            return E.k

        return 0, u.root.encoded_id
示例#4
0
    def __init__(self, in_channels:int, **config) -> NoReturn:
        """ finished, checked,

        Parameters
        ----------
        in_channels: int,
            number of channels in the input
        config: dict,
            other hyper-parameters of the Module, ref. corresponding config file
            key word arguments that have to be set in 3 sub-dict,
            namely in "entry_flow", "middle_flow", and "exit_flow",
            ref. corresponding docstring of each class
        """
        super().__init__()
        self.__in_channels = in_channels
        self.config = ED(deepcopy(config))
        if self.__DEBUG__:
            print(f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}")

        entry_flow_in_channels = self.__in_channels
        entry_flow = XceptionEntryFlow(
            in_channels=entry_flow_in_channels,
            **(self.config.entry_flow)
        )
        self.add_module(
            "entry_flow",
            entry_flow
        )

        _, middle_flow_in_channels, _ = entry_flow.compute_output_shape()
        middle_flow = XceptionMiddleFlow(
            in_channels=middle_flow_in_channels,
            **(self.config.middle_flow)
        )
        self.add_module(
            "middle_flow",
            middle_flow
        )

        _, exit_flow_in_channels, _ = middle_flow.compute_output_shape()
        exit_flow = XceptionExitFlow(
            in_channels=exit_flow_in_channels,
            **(self.config.exit_flow)
        )
        self.add_module(
            "exit_flow",
            exit_flow,
        )
示例#5
0
def get_args(**kwargs):
    """
    """
    cfg = deepcopy(kwargs)
    parser = argparse.ArgumentParser(
        description="Train the Model on LUDB",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # parser.add_argument(
    #     "-l", "--learning-rate",
    #     metavar="LR", type=float, nargs="?", default=0.001,
    #     help="Learning rate",
    #     dest="learning_rate")
    parser.add_argument("-b",
                        "--batch-size",
                        type=int,
                        default=128,
                        help="the batch size for training",
                        dest="batch_size")
    parser.add_argument(
        "-m",
        "--model-name",
        type=str,
        default="unet",
        help="name of the model to train, `unet` or `subtract_unet`",
        dest="model_name")
    parser.add_argument(
        "--keep-checkpoint-max",
        type=int,
        default=50,
        help=
        "maximum number of checkpoints to keep. If set 0, all checkpoints will be kept",
        dest="keep_checkpoint_max")
    parser.add_argument("--optimizer",
                        type=str,
                        default="adam",
                        help="training optimizer",
                        dest="train_optimizer")
    parser.add_argument("--debug",
                        type=str2bool,
                        default=False,
                        help="train with more debugging information",
                        dest="debug")

    args = vars(parser.parse_args())

    cfg.update(args)

    return ED(cfg)
示例#6
0
    def __init__(self,
                 in_channels: int,
                 scopes: Sequence[Sequence[int]],
                 num_filters: Union[Sequence[int], Sequence[Sequence[int]]],
                 filter_lengths: Union[Sequence[int], Sequence[Sequence[int]]],
                 subsample_lengths: Union[int, Sequence[int]],
                 groups: int = 1,
                 **config) -> NoReturn:
        """

        Parameters:
        -----------
        in_channels
        """
        super().__init__()
        self.__in_channels = in_channels
        self.__scopes = scopes
        self.__num_blocks = len(self.__scopes)
        self.__num_filters = num_filters
        assert len(self.__num_filters) == self.__num_blocks, \
            f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `num_filters` indicates {len(self.__num_filters)}"
        self.__filter_lengths = filter_lengths
        assert len(self.__filter_lengths) == self.__num_blocks, \
            f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `filter_lengths` indicates {llen(self.__filter_lengths)}"
        if isinstance(subsample_lengths, int):
            self.__subsample_lengths = list(
                repeat(subsample_lengths, self.__num_blocks))
        else:
            self.__subsample_lengths = filter_lengths
            assert len(self.__subsample_lengths) == self.__num_blocks, \
            f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `subsample_lengths` indicates {llen(self.__subsample_lengths)}"
        self.__groups = groups
        self.config = ED(deepcopy(config))

        block_in_channels = self.__in_channels
        for idx in range(self.__num_blocks):
            self.add_module(
                f"block_{idx}",
                MultiScopicBasicBlock(
                    in_channels=block_in_channels,
                    scopes=self.__scopes[idx],
                    num_filters=self.__num_filters[idx],
                    filter_lengths=self.__filter_lengths[idx],
                    subsample_length=self.__subsample_lengths[idx],
                    groups=self.__groups,
                    dropout=self.config.dropouts[idx],
                    **(self.config.block)))
            block_in_channels = self.__num_filters[idx]
示例#7
0
    def _preprocess_one_record(self,
                               rec: Union[int, str],
                               config: dict,
                               force_recompute: bool = False,
                               verbose: int = 0) -> NoReturn:
        """ finished, checked,

        preprocesses the ecg data in advance for further use,
        offline for `self.persistence`

        Parameters:
        -----------
        rec: int or str,
            number of the record, NOTE that rec_no starts from 1,
            or the record name
        config: dict,
            configurations of preprocessing
        force_recompute: bool, default False,
            if True, recompute regardless of possible existing files
        verbose: int, default 0,
            print verbosity
        """
        # format save path
        save_fp = ED()
        rec_name = self.reader._get_rec_name(rec)
        suffix = self._get_rec_suffix(config.preproc)
        save_fp.data = os.path.join(
            self.preprocess_dir, f"{rec_name}-{suffix}{self.reader.rec_ext}")
        save_fp.rpeaks = os.path.join(
            self.rpeaks_dir, f"{rec_name}-{suffix}{self.reader.rec_ext}")
        if (not force_recompute) and os.path.isfile(
                save_fp.data) and os.path.isfile(save_fp.rpeaks):
            return
        # perform pre-process
        pps = SP.parallel_preprocess_signal(
            self.reader.load_data(rec, keep_dim=False),
            fs=self.reader.fs,
            config=config,
            verbose=verbose,
        )
        # `rpeaks_skip_dist` useless for `seq_lab_detect`, as already set internally
        # pps['rpeaks'] = pps['rpeaks'][np.where( (pps['rpeaks']>=config.rpeaks_skip_dist) & (pps['rpeaks']<len(pps['filtered_ecg'])-config.rpeaks_skip_dist) )[0]]
        # save mat, keep in accordance with original mat files
        savemat(save_fp.data, {'ecg': np.atleast_2d(pps['filtered_ecg']).T},
                format='5')
        savemat(save_fp.rpeaks, {'rpeaks': np.atleast_2d(pps['rpeaks']).T},
                format='5')
示例#8
0
    def load_masks(self,
                   rec: str,
                   leads: Optional[Sequence[str]] = None,
                   mask_format: str = "channel_first",
                   class_map: Optional[Dict[str, int]] = None) -> np.ndarray:
        """ finished, checked,

        load the wave delineation in the form of masks

        Parameters
        ----------
        rec: str,
            name of the record
        leads: str or list of str, optional,
            the leads to load
        mask_format: str, default "channel_first",
            format of the mask,
            "channel_last" (alias "lead_last"), or
            "channel_first" (alias "lead_first")
        class_map: dict, optional,
            custom class map,
            if not set, `self.class_map` will be used

        Returns
        -------
        masks: ndarray,
            the masks corresponding to the wave delineation annotations of `rec`
        """
        _class_map = ED(class_map) if class_map is not None else self.class_map
        _leads = self._normalize_leads(leads,
                                       standard_ordering=True,
                                       lower_cases=True)
        data = self.load_data(rec, leads=_leads, data_format="channel_first")
        masks = np.full_like(data, fill_value=_class_map.i, dtype=int)
        waves = self.load_ann(rec, leads=_leads, metadata=False)["waves"]
        for idx, (l, l_w) in enumerate(waves.items()):
            for w in l_w:
                masks[idx, w.onset:w.offset] = _class_map[
                    self._wavename_to_symbol[w.name]]
        if mask_format.lower() not in [
                "channel_first",
                "lead_first",
        ]:
            masks = masks.T
        return masks
示例#9
0
文件: vgg.py 项目: busyyang/torch_ecg
    def __init__(self, in_channels: int, **config) -> NoReturn:
        """ finished, checked,
        
        Parameters:
        -----------
        in_channels: int,
            number of channels in the input
        config: dict,
            other hyper-parameters of the Module, including
            number of convolutional layers, number of filters for each layer,
            and more for `VGGBlock`.
            key word arguments that have to be set:
            num_convs: sequence of int,
                number of convolutional layers for each `VGGBlock`
            num_filters: sequence of int,
                number of filters for each `VGGBlock`
            groups: int,
                connection pattern (of channels) of the inputs and outputs
            block: dict,
                other parameters that can be set for `VGGBlock`
            for a full list of configurable parameters, ref. corr. config file
        """
        super().__init__()
        self.__in_channels = in_channels
        # self.config = deepcopy(ECG_CRNN_CONFIG.cnn.vgg16)
        self.config = ED(deepcopy(config))
        if self.__DEBUG__:
            print(
                f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}"
            )

        module_in_channels = in_channels
        for idx, (nc, nf) in enumerate(
                zip(self.config.num_convs, self.config.num_filters)):
            module_name = f"vgg_block_{idx+1}"
            self.add_module(name=module_name,
                            module=VGGBlock(
                                num_convs=nc,
                                in_channels=module_in_channels,
                                out_channels=nf,
                                groups=self.config.groups,
                                **(self.config.block),
                            ))
            module_in_channels = nf
示例#10
0
    def get(self, request):
        E = ED()
        E.u, E.k = -1, 1
        E.au, E.no_ent = 2, 3
        if not request.session.get('is_login', False):
            return E.au
        u = User.get_via_encoded_id(request.session['uid'])
        if u is None:
            return E.au
        kwargs: dict = request.GET
        if kwargs.keys() != {'did'}:
            return E.k

        did = kwargs.get('did')

        e = Entity.get_via_encoded_id(did)
        if e is None:
            return E.no_ent
        return 0, e.is_locked
示例#11
0
    def __init__(self,
                 filter_lengths: Sequence[int],
                 subsample_lengths: Sequence[int],
                 dropouts: Optional[float] = None,
                 **kwargs) -> NoReturn:
        """ finished, checked,

        Parameters:
        -----------
        filter_lengths: sequence of int,
            filter length (kernel size) of each convolutional layer in each `CPSCBlock`
        subsample_lengths: sequence of int,
            subsample length (stride) of each convolutional layer in each `CPSCBlock`
        dropout: sequence of float, optional,
            dropout for each `CPSCBlock`
        kwargs: dict,
        """
        super().__init__()
        self.__in_channels = in_channels
        self.config = ED(deepcopy(config))
        if self.__DEBUG__:
            print(
                f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}"
            )

        num_filters = self.config.num_filters
        filter_lengths = self.config.filter_lengths
        subsample_lengths = self.config.subsample_lengths
        dropouts = self.config.dropouts
        blk_in = self.__in_channels
        for blk_idx, (blk_nf, blk_fl, blk_sl, blk_dp) \
            in enumerate(zip(num_filters, filter_lengths, subsample_lengths, dropouts)):
            self.add_module(
                f"cpsc_block_{blk_idx+1}",
                CPSCBlock(
                    in_channels=blk_in,
                    num_filters=blk_nf,
                    filter_lengths=blk_fl,
                    subsample_lengths=blk_sl,
                    dropout=blk_dp,
                ))
            blk_in = blk_nf[-1]
示例#12
0
    def _log(self, logger, ltype: str, **args):
        args = ED(args)

        if ltype == 'scalar':
            step = args.step if 'step' in args else 0
            logger.log_scalar(name=args.name, value=args.value, step=step)

        elif ltype == 'hp':
            logger.log_hparam(name=args.name, value=args.value)

        elif ltype == 'scalardict':
            step = args.step if 'step' in args else 0
            for k, v in args.value.items():
                logger.log_scalar(name=k, value=v, step=step)

        elif ltype == 'hpdict':
            for k, v in args.value.items():
                logger.log_hparam(name=k, value=v)

        elif ltype == 'img':
            step = args.step if 'step' in args else 0
            logger.log_image(name=args.name, img_matrix=args.img, step=step)

        elif ltype == '1d':
            histogram = args.histogram if 'histogram' in args else False
            logger.log_1d(name=args.name,
                          value=args.value,
                          histogram=histogram)
        elif ltype == '2d' or '3d':
            scatter = args.scatter if 'scatter' in args else False
            if ltype == '2d':
                logger.log_2d(name=args.name,
                              value=args.value,
                              scatter=scatter)
            else:
                logger.log_3d(name=args.name,
                              value=args.value,
                              scatter=scatter)

        else:
            print(f'log: unknown ltype = {ltype}')
            raise NotImplementedError
示例#13
0
    def __init__(self, in_channels: int, **config) -> NoReturn:
        """ finished, NOT checked,

        Parameters:
        -----------
        in_channels: int,
        config: dict,
        """
        super().__init__()
        self.__in_channels = in_channels
        self.config = ED(deepcopy(config))

        self.add_module(
            "init_conv",
            Conv_Bn_Activation(
                in_channels=self.__in_channels,
                out_channels=self.config.init_num_filters,
                kernel_size=self.config.init_filter_length,
                stride=self.config.init_subsample_length,
                groups=self.config.groups,
                batch_norm=self.config.batch_norm,
                activation=self.config.activation,
                bias=self.config.bias,
            ))

        _, entry_flow_in_channels, _ = self.init_conv.compute_output_shape()
        entry_flow = MultiConv(in_channels=entry_flow_in_channels,
                               **(self.config.entry_flow))
        self.add_module("entry_flow", entry_flow)

        _, middle_flow_in_channels, _ = entry_flow.compute_output_shape()
        middle_flow = MultiConv(in_channels=middle_flow_in_channels,
                                **(self.config.middle_flow))
        self.add_module("middle_flow", middle_flow)

        _, exit_flow_in_channels, _ = middle_flow.compute_output_shape()
        exit_flow = MultiConv(in_channels=exit_flow_in_channels,
                              **(self.config.exit_flow))
        self.add_module(
            "exit_flow",
            exit_flow,
        )
示例#14
0
    def get(self, request):
        E = ED()
        E.u, E.k = -1, 1
        E.au, E.no, E.no_father = 2, 3, 4
        if not request.session.get('is_login', False):
            return E.au, ''
        u = User.get_via_encoded_id(request.session['uid'])
        if u is None:
            return E.au, ''
        kwargs = request.GET
        if kwargs.keys() != {'id', 'type'}:
            return E.k, ''

        e = Entity.get_via_encoded_id(kwargs.get('id'))
        if e is None:
            return E.no, ''
        if e.father is None:
            return E.no_father, ''

        return 0, e.father.encoded_id
示例#15
0
    def _load_header(self, rec: str) -> dict:
        """ finished, checked,

        load header data into a dict

        Parameters
        ----------
        rec: str,
            name of the record

        Returns
        -------
        header_dict: dict,
        """
        header_dict = ED({})
        rec_fp = os.path.join(self.db_dir, rec)
        header_reader = wfdb.rdheader(rec_fp)
        header_dict["units"] = header_reader.units
        header_dict["baseline"] = header_reader.baseline
        header_dict["adc_gain"] = header_reader.adc_gain
        header_dict["record_fmt"] = header_reader.fmt
        try:
            header_dict["age"] = int([
                l for l in header_reader.comments if "<age>" in l
            ][0].split(": ")[-1])
        except:
            header_dict["age"] = np.nan
        try:
            header_dict["sex"] = [
                l for l in header_reader.comments if "<sex>" in l
            ][0].split(": ")[-1]
        except:
            header_dict["sex"] = ""
        d_start = [
            idx for idx, l in enumerate(header_reader.comments)
            if "<diagnoses>" in l
        ][0] + 1
        header_dict["diagnoses"] = header_reader.comments[d_start:]
        return header_dict
示例#16
0
    def get(self, request):
        E = ED()
        E.u, E.k = -1, 1
        E.au = 2
        if not request.session.get('is_login', False):
            return E.au
        u = User.get_via_encoded_id(request.session['uid'])
        if u is None:
            return E.au
        # todo: 更多权限判断
        # print(request.GET)
        kwargs = request.GET
        if kwargs.keys() != {'tid'}:
            return E.k

        t: Team

        t = Team.get_via_encoded_id(kwargs.get('tid'))
        if t is None or not t.contains_user(u):
            return E.u

        return 0, t.root.encoded_id
示例#17
0
    def train_test_split_rec(self,
                             test_rec_num: int = 2) -> Dict[str, List[str]]:
        """ finished, checked,

        split the records into train set and test set

        Parameters:
        -----------
        test_rec_num: int,
            number of records for the test set

        Returns:
        --------
        split_res: dict,
            with items `train`, `test`, both being list of record names
        """
        if test_rec_num == 1:
            test_records = random.sample(self.subgroups.VS, 1)
        elif test_rec_num == 2:
            test_records = random.sample(self.subgroups.VS, 1) + random.sample(
                self.subgroups.N, 1)
        elif test_rec_num == 3:
            test_records = random.sample(self.subgroups.VS, 1) + random.sample(
                self.subgroups.N, 2)
        elif test_rec_num == 4:
            test_records = []
            for k in self.subgroups.keys():
                test_records += random.sample(self.subgroups[k], 1)
        else:
            raise ValueError("test data ratio too high")
        train_records = [r for r in self.all_records if r not in test_records]

        split_res = ED({
            "train": train_records,
            "test": test_records,
        })

        return split_res
示例#18
0
    def get(self, request):
        E = ED()
        E.u, E.k = -1, 1
        E.au, E.no = 2, 3
        if not request.session.get('is_login', False):
            return E.au
        u = User.get_via_encoded_id(request.session['uid'])
        if u is None:
            return E.au
        # todo: 更多权限判断
        kwargs = request.GET
        if kwargs.keys() != {'did'}:
            return E.k

        e = Entity.get_via_encoded_id(kwargs.get('did'))
        if e is None or e.father is None:
            return E.no

        return (0, e.name, len(e.plain_content), e.creator.encoded_id,
                e.creator.name, e.is_locked, [{
                    'fid': f.encoded_id,
                    'name': f.name
                } for f in e.path])
示例#19
0
    def post(self, request):
        E = ED()
        E.u, E.k = -1, 1
        E.au, E.uni, E.no_id = 2, 3, 4
        if not request.session.get('is_login', False):
            return E.au
        u = User.get_via_encoded_id(request.session['uid'])
        if u is None:
            return E.au
        # todo: 更多权限判断
        kwargs: dict = json.loads(request.body)
        if kwargs.keys() != {'id', 'type'}:
            return E.k

        e = Entity.get_via_encoded_id(kwargs['id'])
        if e is None:
            return E.u
        if u.links.filter(ent__name=e.name):
            return E.uni

        Links.objects.create(user=u, ent=e)

        return 0
示例#20
0
    def post(self, request):
        E = ED()
        E.u, E.k = -1, 1
        E.au, E.no_id = 2, 3
        if not request.session.get('is_login', False):
            return E.au
        u = User.get_via_encoded_id(request.session['uid'])
        if u is None:
            return E.au
        # todo: 更多权限判断
        kwargs: dict = json.loads(request.body)
        if kwargs.keys() != {'id', 'type'}:
            return E.k

        e = Entity.get_via_encoded_id(kwargs['id'])
        if e is None:
            return E.no_id

        e.is_deleted = True
        e.delete_dt = datetime.now()
        e.save()

        return 0
示例#21
0
    def post(self, request):
        E = ED()
        E.u, E.k = -1, 1
        E.au, E.not_found = 2, 3
        if not request.session.get('is_login', False):
            return E.au
        u = User.get_via_encoded_id(request.session['uid'])
        if u is None:
            return E.au
        # todo: 更多权限判断
        kwargs: dict = json.loads(request.body)
        if kwargs.keys() != {}.keys():
            return E.k

        all_f = []
        for rec in u.create_records.filter(ent__is_deleted=True):
            e: Entity = rec.ent
            all_f.extend(e.subtree)

        fids = [f.id for f in all_f]
        all_f = [Entity.objects.get(id=fid) for fid in list(set(fids))]
        [f.delete() for f in all_f]
        return 0
示例#22
0
    def __init__(self, db_dir:str, working_dir:Optional[str]=None, verbose:int=2, **kwargs:Any) -> NoReturn:
        """ finished, to be improved,

        Parameters
        ----------
        db_dir: str,
            storage path of the database
        working_dir: str, optional,
            working directory, to store intermediate files and log file
        verbose: int, default 2,
            log verbosity
        kwargs: auxilliary key word arguments
        """
        super().__init__(db_name="CPSC2020", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)

        self.fs = 400
        self.spacing = 1000/self.fs
        self.rec_ext = "mat"
        self.ann_ext = "mat"

        self.nb_records = 10
        self._all_records = [f"A{i:02d}" for i in range(1,1+self.nb_records)]
        self._all_annotations = [f"R{i:02d}" for i in range(1,1+self.nb_records)]
        # self.all_references = self.all_annotations
        self.rec_dir = os.path.join(self.db_dir, "data")
        self.ann_dir = os.path.join(self.db_dir, "ref")
        self.data_dir = self.rec_dir
        self.ref_dir = self.ann_dir

        self.subgroups = ED({
            "N":  ["A01", "A03", "A05", "A06",],
            "V":  ["A02", "A08"],
            "S":  ["A09", "A10"],
            "VS": ["A04", "A07"],
        })

        self.palette = {"spb": "yellow", "pvc": "red",}
示例#23
0
    def post(self, request):
        E = ED()
        E.u, E.k = -1, 1
        E.au, E.no_ent = 2, 3
        if not request.session.get('is_login', False):
            return E.au
        u = User.get_via_encoded_id(request.session['uid'])
        if u is None:
            return E.au
        kwargs: dict = json.loads(request.body)
        if kwargs.keys() != {'did', 'is_locked'}:
            return E.k

        did, is_locked = kwargs['did'], kwargs['is_locked']

        e = Entity.get_via_encoded_id(did)
        if e is None:
            return E.no_ent
        e.is_locked = is_locked
        try:
            e.save()
        except:
            return E.u
        return 0
示例#24
0
    def post(self, request):
        E = ED()
        E.u, E.k = -1, 1
        E.au, E.not_found = 2, 3
        if not request.session.get('is_login', False):
            return E.au
        u = User.get_via_encoded_id(request.session['uid'])
        if u is None:
            return E.au
        # todo: 更多权限判断
        kwargs: dict = json.loads(request.body)
        if kwargs.keys() != {'id', 'type'}:
            return E.k

        e = Entity.objects.filter(id=int(decode(kwargs['id'])))
        if not e.exists():
            return E.not_found
        ent: Entity = e.get()
        if not ent.is_deleted:
            return E.not_found

        [so.delete() for so in ent.subtree]

        return 0
示例#25
0
    def __init__(self,
                 in_channels: int,
                 num_filters: int,
                 filter_length: int,
                 subsample_length: int,
                 groups: int = 1,
                 dilation: int = 1,
                 dropouts: Union[float, Sequence[float]] = 0,
                 **config) -> NoReturn:
        """ finished, NOT checked,

        Parameters
        ----------
        in_channels: int,
            number of features (channels) of the input
        num_filters: int,
            number of filters for the convolutional layers
        filter_length: int,
            length (size) of the filter kernels
        subsample_lengths: int,
            subsample length,
            including pool size for short cut, and stride for the top convolutional layer
        groups: int, default 1,
            pattern of connections between inputs and outputs,
            for more details, ref. `nn.Conv1d`
        dilation: int, default 1,
            dilation of the convolutional layers
        dropouts: float, or sequence of float, default 0.0,
            dropout ratio after each convolution (and batch normalization, and activation, etc.)
        config: dict,
            other hyper-parameters, including
            filter length (kernel size), activation choices, weight initializer,
            and short cut patterns, etc.
        """
        super().__init__()
        self.__num_convs = 2
        self.__in_channels = in_channels
        self.__out_channels = num_filters
        self.__kernel_size = filter_length
        self.__down_scale = subsample_length
        self.__stride = subsample_length
        self.__groups = groups
        self.__dilation = dilation
        if isinstance(dropouts, float):
            self.__dropouts = list(repeat(dropouts, self.__num_convs))
        else:
            self.__dropouts = list(dropouts)
        assert len(self.__dropouts) == self.__num_convs
        self.config = ED(deepcopy(config))
        if self.__DEBUG__:
            print(
                f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}"
            )

        self.__increase_channels = (self.__out_channels > self.__in_channels)
        self.shortcut = self._make_shortcut_layer()

        self.main_stream = nn.Sequential()
        conv_in_channels = self.__in_channels
        for i in range(self.__num_convs):
            conv_activation = (self.config.activation
                               if i < self.__num_convs - 1 else None)
            self.main_stream.add_module(
                f"cba_{i}",
                Conv_Bn_Activation(
                    in_channels=conv_in_channels,
                    out_channels=self.__out_channels,
                    kernel_size=self.__kernel_size,
                    stride=(self.__stride if i == 0 else 1),
                    dilation=self.__dilation,
                    groups=self.__groups,
                    batch_norm=True,
                    activation=conv_activation,
                    kw_activation=self.config.kw_activation,
                    kernel_initializer=self.config.kernel_initializer,
                    kw_initializer=self.config.kw_initializer,
                    bias=self.config.bias,
                ))
            conv_in_channels = self.__out_channels
            if i == 0 and self.__dropouts[i] > 0:
                self.main_stream.add_module(f"dropout_{i}",
                                            nn.Dropout(self.__dropouts[i]))
            if i == 1:
                self.main_stream.add_module(
                    f"gcb",
                    GlobalContextBlock(
                        in_channels=self.__out_channels,
                        ratio=self.config.gcb.ratio,
                        reduction=self.config.gcb.reduction,
                        pooling_type=self.config.gcb.pooling_type,
                        fusion_types=self.config.gcb.fusion_types,
                    ))

        if isinstance(self.config.activation, str):
            self.out_activation = \
                Activations[self.config.activation.lower()](**self.config.kw_activation)
        else:
            self.out_activation = \
                self.config.activation(**self.config.kw_activation)

        if self.__dropouts[1] > 0:
            self.out_dropout = nn.Dropout(self.__dropouts[1])
        else:
            self.out_dropout = None
示例#26
0
    def __init__(self, stage: int, out_branches: int, in_channels: int,
                 **config) -> NoReturn:
        """ NOT finished, NOT checked,
        """
        super().__init__()
        self.stage = stage
        self.out_branches = out_branches
        self.in_channels = in_channels
        self.config = ED(config)

        self.branches = nn.ModuleList()
        for i in range(self.stage):
            w = in_channels * (2**i)
            branch = nn.Sequential(
                ResNetGCBlock(in_channels=w,
                              num_filters=w,
                              **(config.resnet_gc)),
                ResNetGCBlock(in_channels=w,
                              num_filters=w,
                              **(config.resnet_gc)),
                ResNetGCBlock(in_channels=w,
                              num_filters=w,
                              **(config.resnet_gc)),
            )
            self.branches.append(branch)

        self.fuse_layers = nn.ModuleList()
        for i in range(self.out_branches):
            fl = nn.ModuleList()
            for j in range(self.stage):
                if i == j:
                    fl.append(nn.Sequential())
                elif i < j:
                    if i == 0:
                        fl.append(
                            nn.Sequential(
                                nn.Conv1d(in_channels * (2**j),
                                          in_channels * (2**i),
                                          kernel_size=1,
                                          stride=1),
                                nn.BatchNorm1d(in_channels * (2**i)),
                                nn.Upsample(size=625),
                            ))
                    elif i == 1:
                        fl.append(
                            nn.Sequential(
                                nn.Conv1d(in_channels * (2**j),
                                          in_channels * (2**i),
                                          kernel_size=1,
                                          stride=1),
                                nn.BatchNorm1d(in_channels * (2**i)),
                                nn.Upsample(size=313)))
                    elif i == 2:
                        fl.append(
                            nn.Sequential(
                                nn.Conv1d(in_channels * (2**j),
                                          in_channels * (2**i),
                                          kernel_size=1,
                                          stride=1),
                                nn.BatchNorm1d(in_channels * (2**i)),
                                nn.Upsample(size=157)))

                elif i > j:
                    opts = []
                    if i == j + 1:
                        opts.append(
                            Conv_Bn_Activation(
                                in_channels=in_channels * (2**j),
                                out_channels=in_channels * (2**i),
                                kernel_size=7,
                                stride=2,
                                batch_norm=True,
                                activation=None,
                            ))
                    elif i == j + 2:
                        opts.append(
                            MultiConv(
                                in_channels=in_channels * (2**j),
                                out_channels=[
                                    in_channels * (2**(j + 1)),
                                    in_channels * (2**(j + 2))
                                ],
                                filter_lengths=7,
                                subsample_lengths=2,
                                out_activation=False,
                            ))
                    elif i == j + 3:
                        opts.append(
                            MultiConv(
                                in_channels=in_channels * (2**j),
                                out_channels=[
                                    in_channels * (2**(j + 1)),
                                    in_channels * (2**(j + 2)),
                                    in_channels * (2**(j + 3))
                                ],
                                filter_lengths=7,
                                subsample_lengths=2,
                                out_activation=False,
                            ))
                    fl.append(nn.Sequential(*opts))
            self.fuse_layers.append(fl)
        self.fuse_activation = nn.ReLU(inplace=True)
示例#27
0
    multi_scopic_block,
    multi_scopic,
    multi_scopic_leadwise,
)
from .attn import (
    non_local,
    squeeze_excitation,
    global_context,
)

__all__ = [
    "ECG_SEQ_LAB_NET_CONFIG",
]

# vanilla config, for delineation using single-lead ECG in corresponding papers
ECG_SEQ_LAB_NET_CONFIG = ED()

ECG_SEQ_LAB_NET_CONFIG.cnn = ED()
ECG_SEQ_LAB_NET_CONFIG.cnn.name = "multi_scopic"
ECG_SEQ_LAB_NET_CONFIG.cnn.multi_scopic = deepcopy(multi_scopic)
_base_num_filters = 4
ECG_SEQ_LAB_NET_CONFIG.cnn.multi_scopic.num_filters = [
    [
        _base_num_filters * 4,
        _base_num_filters * 8,
        _base_num_filters * 16,
    ],
    [
        _base_num_filters * 4,
        _base_num_filters * 8,
        _base_num_filters * 16,
示例#28
0
    def __init__(self,
                 in_channels:int,
                 num_filters:Sequence[int],
                 filter_lengths:Union[Sequence[int],int],
                 subsample_length:int=1,
                 subsample_kernel:Optional[int]=None,
                 dilations:Union[Sequence[int],int]=1,
                 groups:int=1,
                 dropouts:Union[Sequence[float],float]=0.0,
                 **config) -> NoReturn:
        """ finished, checked,

        Parameters
        ----------
        in_channels: int,
            number of channels in the input
        num_filters: sequence of int,
            number of channels produced by the main stream convolutions,
            the length of `num_filters` also indicates the number of convolutions
        filter_lengths: int or sequence of int,
            length(s) of the filters (kernel size)
        subsample_length: int,
            stride of the main stream subsample layer
        subsample_kernel: int, optional,
            kernel size of the main stream subsample layer,
            if not set, defaults to `subsample_length`,
        dilations: int or sequence of int, default 1,
            dilation(s) of the convolutions
        groups: int, default 1,
            connection pattern (of channels) of the inputs and outputs
        dropouts: float or sequence of float, default 0.0,
            dropout ratio after each `Conv_Bn_Activation`
        config: dict,
            other parameters, including
            activation choices, weight initializer, batch normalization choices, etc.,
            for the convolutional layers,
            and subsampling modes for subsampling layers, etc.
        """
        super().__init__()
        self.__in_channels = in_channels
        self.__num_filters = list(num_filters)
        self.__num_convs = len(self.__num_filters)
        if isinstance(filter_lengths, int):
            self.__filter_lengths = list(repeat(filter_lengths, self.__num_convs))
        else:
            self.__filter_lengths = list(filter_lengths)
        assert self.__num_convs == len(self.__filter_lengths), \
            f"the main stream has {self.__num_convs} convolutions, while `filter_lengths` indicates {len(self.__filter_lengths)}"
        self.__subsample_length = subsample_length
        self.__subsample_kernel = subsample_kernel or subsample_length
        self.__groups = groups
        if isinstance(dilations, int):
            self.__dilations = list(repeat(dilations, self.__num_convs))
        else:
            self.__dilations = list(dilations)
        assert self.__num_convs == len(self.__dilations), \
            f"the main stream has {self.__num_convs} convolutions, while `dilations` indicates {len(self.__dilations)}"
        if isinstance(dropouts, Real):
            self.__dropouts = list(repeat(dropouts, self.__num_convs))
        else:
            self.__dropouts = list(dropouts)
        assert self.__num_convs == len(self.__dropouts), \
            f"the main stream has {self.__num_convs} convolutions, while `dropouts` indicates {len(self.__dropouts)}"
        self.config = ED(deepcopy(_DEFAULT_CONV_CONFIGS))
        self.config.update(deepcopy(config))

        self.main_stream_conv = MultiConv(
            in_channels=self.__in_channels,
            out_channels=self.__num_filters,
            filter_lengths=self.__filter_lengths,
            subsample_lengths=1,
            dilations=self.__dilations,
            groups=self.__groups,
            dropouts=self.__dropouts,
            **self.config
        )
        if self.__subsample_length > 1:
            self.subsample = DownSample(
                down_scale=self.__subsample_length,
                in_channels=self.__num_filters[-1],
                kernel_size=self.__subsample_kernel,
                groups=self.__groups,
                padding=(self.__subsample_kernel-1)//2,
                mode=self.config.subsample_mode,
            )
            self.shortcut = DownSample(
                down_scale=self.__subsample_length,
                in_channels=self.__in_channels,
                out_channels=self.__num_filters[-1],
                groups=self.__groups,
                kernel_size=1,
                batch_norm=self.config.batch_norm,
                mode="conv",
            )
        else:
            self.subsample = None
            self.shortcut = None
示例#29
0
    def __init__(self,
                 in_channels:int,
                 final_num_filters:Sequence[int],
                 final_filter_lengths:Union[int,Sequence[int]],
                 num_filters:Union[Sequence[int],Sequence[Sequence[int]]],
                 filter_lengths:Union[int,Sequence[int],Sequence[Sequence[int]]],
                 subsample_lengths:Union[int,Sequence[int]],
                 subsample_kernels:Optional[Union[int,Sequence[int]]]=None,
                 dilations:Union[int,Sequence[int],Sequence[Sequence[int]]]=1,
                 groups:int=1,
                 dropouts:Union[float,Sequence[float],Sequence[Sequence[float]]]=0.0,
                 block_dropouts:Union[float,Sequence[float]]=0.0,
                 **config) -> NoReturn:
        """ finished, checked,

        Parameters
        ----------
        in_channels: int,
            number of channels in the input
        final_num_filters: sequence of int,
            number of filters (output channels) of the final convolutions
        final_filter_lengths: int or sequence of int,
            filter length(s) of the convolutions of the final convolutions
        final_subsample_lengths: int or sequence of int,
            subsampling length(s) (stride(s)) of the final convolutions
        num_filters: sequence of int or sequence of sequences of int,
            number of filters of the convolutions of Xception blocks
        filter_lengths: int or sequence of int or sequence of sequences of int,
            filter length(s) of the convolutions of Xception blocks
        subsample_lengths: int or sequence of int,
            subsampling length(s) of the Xception blocks
        subsample_kernels: int or sequence of int, optional,
            subsampling kernel size(s) of the Xception blocks
        dilations: int or sequence of int or sequence of sequences of int, default 1,
            dilation(s) of the convolutions of Xception blocks
        groups: int, default 1,
            connection pattern (of channels) of the inputs and outputs
        dropouts: float or sequence of float or sequence of sequences of float, default 0.0,
            dropout(s) after each `Conv_Bn_Activation` blocks in the Xception blocks
        block_dropouts: float or sequence of float, default 0.0,
            dropout(s) after each of the Xception blocks and each of the final convolutions
        config: dict,
            other parameters for Xception blocks and final convolutions, including
            activation choices, weight initializer, batch normalization choices, etc.
            for the convolutional layers,
            and subsampling modes for subsampling layers, etc.
        """
        super().__init__()
        self.__in_channels = in_channels
        self.__num_filters = list(num_filters)
        self.__num_blocks = len(self.__num_filters)
        if isinstance(filter_lengths, int):
            self.__filter_lengths = list(repeat(filter_lengths, self.__num_blocks))
        else:
            self.__filter_lengths = list(filter_lengths)
        assert self.__num_blocks == len(self.__filter_lengths), \
            f"the exit flow has {self.__num_blocks} blocks, while `filter_lengths` indicates {len(self.__filter_lengths)}"
        if isinstance(subsample_lengths, int):
            self.__subsample_lengths = list(repeat(subsample_lengths, self.__num_blocks))
        else:
            self.__subsample_lengths = list(subsample_lengths)
        assert self.__num_blocks == len(self.__subsample_lengths), \
            f"the exit flow has {self.__num_blocks} blocks, while `subsample_lengths` indicates {len(self.__subsample_lengths)}"
        if subsample_kernels is None:
            self.__subsample_kernels = deepcopy(self.__subsample_lengths)
        elif isinstance(subsample_kernels, int):
            self.__subsample_kernels = list(repeat(subsample_kernels, self.__num_blocks))
        else:
            self.__subsample_kernels = list(subsample_kernels)
        assert self.__num_blocks == len(self.__subsample_kernels), \
            f"the exit flow has {self.__num_blocks} blocks, while `subsample_kernels` indicates {len(self.__subsample_kernels)}"
        if isinstance(dilations, int):
            self.__dilations = list(repeat(dilations, self.__num_blocks))
        else:
            self.__dilations = list(dilations)
        assert self.__num_blocks == len(self.__dilations), \
            f"the exit flow has {self.__num_blocks} blocks, while `dilations` indicates {len(self.__dilations)}"
        if isinstance(dropouts, Real):
            self.__dropouts = list(repeat(dropouts, self.__num_blocks))
        else:
            self.__dropouts = list(dropouts)
        assert self.__num_blocks == len(self.__dropouts), \
            f"the exit flow has {self.__num_blocks} blocks, while `dropouts` indicates {len(self.__dropouts)}"
        if isinstance(block_dropouts, Real):
            self.__block_dropouts = list(repeat(block_dropouts, self.__num_blocks + len(final_num_filters)))
        else:
            self.__block_dropouts = list(block_dropouts)
        assert self.__num_blocks + len(final_num_filters) == len(self.__block_dropouts), \
            f"the exit flow has {self.__num_blocks + len(final_num_filters)} blocks, including the final convolutions, while `block_dropouts` indicates {len(self.__block_dropouts)}"
        self.__groups = groups
        self.config = ED(deepcopy(_DEFAULT_CONV_CONFIGS))
        self.config.update(deepcopy(config))

        block_in_channels = self.__in_channels
        for idx, nf in enumerate(self.__num_filters):
            # number of main stream convolution defaults to 2
            if isinstance(nf, int):
                block_out_channels = list(repeat(nf, 2))
            else:
                block_out_channels = list(nf)
            self.add_module(
                f"exit_flow_conv_block_{idx}",
                XceptionMultiConv(
                    in_channels=block_in_channels,
                    num_filters=block_out_channels,
                    filter_lengths=self.__filter_lengths[idx],
                    subsample_length=self.__subsample_lengths[idx],
                    subsample_kernel=self.__subsample_kernels[idx],
                    dilations=self.__dilations[idx],
                    groups=self.__groups,
                    dropouts=self.__dropouts[idx],
                    **self.config
                )
            )
            block_in_channels = block_out_channels[-1]
            if self.__block_dropouts[idx] > 0:
                self.add_module(
                    f"exit_flow_dropout_{idx}",
                    nn.Dropout(self.__block_dropouts[idx])
                )

        self.add_module(
            "final_convs",
            MultiConv(
                in_channels=block_in_channels,
                out_channels=final_num_filters,
                filter_lengths=final_filter_lengths,
                groups=groups,
                conv_type="separable",
                activation=self.config.activation,
            )
        )
示例#30
0
if Cfg.torch_dtype.lower() == "double":
    torch.set_default_tensor_type(torch.DoubleTensor)


__all__ = [
    "Xception",
    "XceptionEntryFlow", "XceptionMiddleFlow", "XceptionExitFlow",
    "XceptionMultiConv",
]


_DEFAULT_CONV_CONFIGS = ED(
    ordering="acb",
    conv_type="separable",
    batch_norm=True,
    subsample_mode="max",
    activation="relu",
    kw_activation={"inplace": True},
    kernel_initializer="he_normal",
    kw_initializer={},
)


class XceptionMultiConv(nn.Module):
    """

    -> n(2 or 3) x (activation -> norm -> sep_conv) (-> optional sub-sample) ->
    |-------------------------------- shortcut ------------------------------|
    """
    __DEBUG__ = True
    __name__ = "XceptionMultiConv"