Exemplo n.º 1
0
    def __init__(self, args, num_chan):
        super(RiskFactorPool, self).__init__(args, num_chan)
        self.args = args
        self.internal_pool = get_pool(args.pool_name)(args, num_chan)
        assert not self.internal_pool.replaces_fc()
        self.dropout = nn.Dropout(args.dropout)
        self.length_risk_factor_vector = RiskFactorVectorizer(
            args).vector_length
        self.fc = nn.Linear(self.length_risk_factor_vector + num_chan,
                            args.num_classes)

        self.args.hidden_dim = self.length_risk_factor_vector + num_chan
    def __init__(self, args, num_chan):
        super(RiskFactorPool, self).__init__(args, num_chan)
        self.args = args
        self.internal_pool = get_pool(args.pool_name)(args, num_chan)
        assert not self.internal_pool.replaces_fc()
        self.length_risk_factor_vector = RiskFactorVectorizer(
            args).vector_length

        input_dim = self.length_risk_factor_vector + num_chan

        self.fc1 = nn.Linear(input_dim, num_chan)
        self.relu = nn.ReLU(inplace=True)
        self.bn = nn.BatchNorm1d(num_chan)
        self.dropout = nn.Dropout(args.dropout)
        self.fc2 = nn.Linear(num_chan, args.num_classes)
        self.args.hidden_dim = num_chan
Exemplo n.º 3
0
    def __init__(self, args, num_chan):
        super(RiskFactorPool, self).__init__(args, num_chan)
        self.args = args
        self.internal_pool = get_pool(args.pool_name)(args, num_chan)
        assert not self.internal_pool.replaces_fc()
        self.dropout = nn.Dropout(args.dropout)
        self.length_risk_factor_vector = RiskFactorVectorizer(
            args).vector_length
        if args.pred_risk_factors:
            for key in args.risk_factor_keys:
                num_key_features = args.risk_factor_key_to_num_class[key]
                key_fc = nn.Linear(self.args.hidden_dim, num_key_features)
                self.add_module('{}_fc'.format(key), key_fc)

        self.args.img_only_dim = self.args.hidden_dim
        self.args.rf_dim = self.length_risk_factor_vector
        self.args.hidden_dim = self.args.rf_dim + self.args.img_only_dim
Exemplo n.º 4
0
    def __init__(self, layers, args):
        """Initializes a generalized resnet. Supports arbitrary block configurations per layer.

        Arguments:
            layers(list): A length-4 list with the list of block
            classes in each of the 4 layers. Blocks can be
            basic blocks, bottlenecks, non-locals etc.

            num_classes(int): The number of classes the network
                is predicting between, i.e. the size of the final
                layer of the network.
            args(Args): configuration of experiment. Used to determine num gpus,
            cuda mode, etc.
        """

        super(ResNet, self).__init__()

        self.args = args
        self.args.wrap_model = False
        self.args.hidden_dim = 512 * args.block_widening_factor
        self.inplanes = max(64 * args.block_widening_factor, self.args.input_dim)

        self.all_blocks = []
        downsampler = Downsampler(self.inplanes, self.args.input_dim)
        self.add_module('downsampler', downsampler)
        self.all_blocks.append('downsampler')

        layer_modules = [(self._make_layer(self.inplanes, layers[0]), 'layer1_{}')]
        current_dim = self.inplanes
        indx = 1
        for layer_i in layers[1:]:
            indx += 1
            current_dim = min(current_dim * 2, 1024)
            layer_modules.append(
                            (self._make_layer(current_dim, layer_i, stride=2),
                             'layer{}_'.format(indx)+'{}')
                            )

        args.hidden_dim = current_dim

        '''
            For all layers, register all constituent blocks to the module,
            and record block names for later access in self.all_blocks
        '''
        for layer, layer_name in layer_modules:
            for indx, block in enumerate(layer):
                block_name = layer_name.format(indx)
                self.add_module(block_name, block)
                self.all_blocks.append(block_name)

        last_block = layers[-1][-1]

        pool_name = args.pool_name
        if args.use_risk_factors:
            pool_name = 'DeepRiskFactorPool' if self.args.deep_risk_factor_pool else 'RiskFactorPool'
        self.pool = get_pool(pool_name)(args, args.hidden_dim)

        if not self.pool.replaces_fc():
            # Cannot not placed on self.all_blocks since requires intermediate op
            self.dropout = nn.Dropout(p=args.dropout)
            self.fc = nn.Linear(args.hidden_dim, args.num_classes)


        self.gpu_to_layer_assignments = self.get_gpu_to_layer()
Exemplo n.º 5
0
    def __init__(self, layers, args):
        """Initializes a generalized resnet. Supports arbitrary block configurations per layer.

        Arguments:
            layers(list): A length-4 list with the list of block
            classes in each of the 4 layers. Blocks can be
            basic blocks, bottlenecks, non-locals etc.

            num_classes(int): The number of classes the network
                is predicting between, i.e. the size of the final
                layer of the network.
            args(Args): configuration of experiment. Used to determine num gpus,
            cuda mode, etc.
        """

        super(ResNet, self).__init__()

        self.args = args
        self.args.wrap_model = False

        if hasattr(args,
                   'use_spatial_transformer') and args.use_spatial_transformer:
            self.stn = get_spatial_transformer(
                args.spatial_transformer_name)(args)

        self.args.hidden_dim = 512 * args.block_widening_factor
        input_dim = self.args.input_dim if self.args.use_precomputed_hiddens else self.args.num_chan
        self.inplanes = max(64 * args.block_widening_factor, input_dim)

        self.all_blocks = []
        if not self.args.use_precomputed_hiddens:
            downsampler = Downsampler(self.inplanes, input_dim)
            self.add_module('downsampler', downsampler)
            self.all_blocks.append('downsampler')

        layer_modules = [(self._make_layer(self.inplanes,
                                           layers[0]), 'layer1_{}')]
        current_dim = self.inplanes
        indx = 1
        for layer_i in layers[1:]:
            indx += 1
            current_dim = min(current_dim * 2, 1024)
            layer_modules.append(
                (self._make_layer(current_dim, layer_i,
                                  stride=2), 'layer{}_'.format(indx) + '{}'))
        args.hidden_dim = current_dim
        '''
            For all layers, register all constituent blocks to the module,
            and record block names for later access in self.all_blocks
        '''
        for layer, layer_name in layer_modules:
            for indx, block in enumerate(layer):
                block_name = layer_name.format(indx)
                self.add_module(block_name, block)
                self.all_blocks.append(block_name)

        last_block = layers[-1][-1]

        pool_name = args.pool_name
        if args.use_risk_factors:
            pool_name = 'DeepRiskFactorPool' if self.args.deep_risk_factor_pool else 'RiskFactorPool'
        self.pool = get_pool(pool_name)(args, args.hidden_dim)

        if not self.pool.replaces_fc():
            # Cannot not placed on self.all_blocks since requires intermediate op
            self.relu = nn.ReLU(inplace=True)
            self.dropout = nn.Dropout(p=args.dropout)
            self.fc = nn.Linear(args.hidden_dim, args.num_classes)

        if args.use_region_annotation and args.region_annotation_loss_type == 'pred_region':
            self.region_fc = nn.Conv2d(
                current_dim,
                1,
                kernel_size=args.region_annotation_pred_kernel_size,
                padding=(args.region_annotation_pred_kernel_size - 1) // 2)

        if args.predict_birads:
            self.birads_fc = nn.Linear(args.hidden_dim, 2)

        if args.survival_analysis_setup:
            self.prob_of_failure_layer = Cumulative_Probability_Layer(
                args.hidden_dim, args, max_followup=args.max_followup)

        self.gpu_to_layer_assignments = self.get_gpu_to_layer()