コード例 #1
0
def validate(model, df, input_shape, output_shape, n_tiles, n_classes):
    dice_coefs = []
    for image_path, label_path in zip(df["preprocessed"], df["label"]):
        image = load_nifti(image_path)
        label = load_nifti(label_path)
        output = feedforward(model, image, input_shape, output_shape, n_tiles,
                             n_classes)
        y = np.int32(np.argmax(output, axis=0))
        dice_coefs.append(dice_coefficients(y, label, labels=range(n_classes)))
    dice_coefs = np.array(dice_coefs)
    return np.mean(dice_coefs, axis=0)
コード例 #2
0
ファイル: tests.py プロジェクト: YXSIO/coursera-mlclass
 def test_prediction(self):
     m,_ = self.X.shape
     X = np.hstack(( np.ones((m,1)), self.X ))
     predictions = feedforward(X,self.theta1,self.theta2)
     
     # because self.y uses 10 for 0, so the vectorized y representation is shifted.
     # if y=10, the output layer will look like [0,0,0,0,0,0,0,0,0,1], so argmax == 9 (i.e. 10 in octave/matlab, which represents class 0)
     # if y=1 , the output layer will look like [1,0,0,0,0,0,0,0,0,0], so argmax == 0 (i.e. 1  in octave/matlab, which represents class 1)
     # so the fix is, we minus 1 on all elements on y, so the argmax will be 0 indexed, which is good for python.
     expected = (self.y - 1).reshape(-1)
     acc = accuracy(predictions,expected)
     self.assertAlmostEqual(acc, 97.5, places=1)
コード例 #3
0
    def test_prediction(self):
        m, _ = self.X.shape
        X = np.hstack((np.ones((m, 1)), self.X))
        predictions = feedforward(X, self.theta1, self.theta2)

        # because self.y uses 10 for 0, so the vectorized y representation is shifted.
        # if y=10, the output layer will look like [0,0,0,0,0,0,0,0,0,1], so argmax == 9 (i.e. 10 in octave/matlab, which represents class 0)
        # if y=1 , the output layer will look like [1,0,0,0,0,0,0,0,0,0], so argmax == 0 (i.e. 1  in octave/matlab, which represents class 1)
        # so the fix is, we minus 1 on all elements on y, so the argmax will be 0 indexed, which is good for python.
        expected = (self.y - 1).reshape(-1)
        acc = accuracy(predictions, expected)
        self.assertAlmostEqual(acc, 97.5, places=1)
コード例 #4
0
    def __call__(self, inputs, mask):
        '''
        Args:
            inputs: sequence embeddings (item_embeddings +  pos_embeddings) shape: (batch_size , max_len, embedding_size)
            mask:  deal with mask shape: (batch_size, max_len, 1)
        Return:
            Output sequences which has the same shape with inputs
        '''
        if self.pos_fixed:  # use sin /cos positional embedding
            position_encoding = self.get_position_encoding(
                inputs)  # (batch_size, len, num_units)

        inputs += position_encoding
        inputs *= mask
        for i in range(self.num_blocks):
            with tf.variable_scope("num_blocks_%d" % i):
                # Self-attention
                inputs = multihead_attention(
                    queries=layer_normalization(inputs),
                    keys=inputs,
                    num_units=self.num_units,
                    num_heads=self.num_heads,
                    dropout_keep_prob=self.dropout_keep_prob,
                    causality=False,
                    scope="self_attention")

                # Feed forward
                inputs = feedforward(
                    layer_normalization(inputs),
                    num_units=[self.num_units, self.num_units],
                    dropout_keep_prob=self.dropout_keep_prob)

                inputs *= mask
        outputs = layer_normalization(
            inputs)  # (batch_size, max_len, num_units)
        return outputs
コード例 #5
0
def main():
    parser = argparse.ArgumentParser(
        description="calculate class probabilities with VoxResNet")
    parser.add_argument("--input_file",
                        "-i",
                        type=str,
                        help="input json file of test dataset")
    parser.add_argument("--output_suffix",
                        "-o",
                        type=str,
                        help="result of the segmentation")
    parser.add_argument(
        "--model",
        "-m",
        type=str,
        help="a file containing parameters of trained VoxResNet")
    parser.add_argument(
        "--input_shape",
        type=int,
        nargs="*",
        action="store",
        default=[80, 80, 80],
        help="input patch shape of VoxResNet, default=[80, 80, 80]")
    parser.add_argument(
        "--output_shape",
        type=int,
        nargs="*",
        action="store",
        default=[60, 60, 60],
        help="output patch shape of VoxResNet, default=[60, 60, 60]")
    parser.add_argument("--gpu",
                        "-g",
                        default=-1,
                        type=int,
                        help="negative value indicates no gpu, default=-1")
    parser.add_argument("--n_tiles",
                        type=int,
                        nargs="*",
                        action="store",
                        default=[5, 5, 5],
                        help="number of tiles along each axis")
    args = parser.parse_args()
    print(args)

    with open(args.input_file) as f:
        dataset = json.load(f)
    test_df = pd.DataFrame(dataset["data"])

    vrn = VoxResNet(dataset["in_channels"], dataset["n_classes"])
    chainer.serializers.load_npz(args.model, vrn)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        vrn.to_gpu()

    for image_path, subject in zip(test_df["image"], test_df["subject"]):
        image, affine = load_nifti(image_path, with_affine=True)
        output = feedforward(vrn, image, args.input_shape, args.output_shape,
                             args.n_tiles, dataset["n_classes"])

        output /= np.sum(output, axis=0, keepdims=True)

        nib.save(
            nib.Nifti1Image(np.float32(output).transpose(1, 2, 3, 0), affine),
            os.path.join(os.path.dirname(image_path),
                         subject + args.output_suffix))
コード例 #6
0
        # Dropout
        embeds = tf.nn.dropout(embeds, keep_prob=dropout_keep_prob)
        enc = embeds
        # Blocks
        for i in range(conf.num_blocks):
            with tf.variable_scope("num_blocks_{}".format(i)):
                # Multihead Attention
                enc = utils.multihead_attention(queries=enc,
                                                keys=embeds,
                                                num_units=hidden_units,
                                                num_heads=10,
                                                dropout_rate=dropout_keep_prob,
                                                causality=False)

                # Feed Forward
                enc = utils.feedforward(
                    enc, num_units=[4 * hidden_units, hidden_units])
        text_embeddings = tf.reduce_mean(enc, axis=1)
    else:
        tf.logging.info("1D Convolution Model")
        sizes = range(2, 5)
        result_tensors = []
        for ngram_size in sizes:
            # 256 -> 2,3 best yet.
            text_conv1d = tf.layers.conv1d(
                inputs=embeds,
                filters=256,
                kernel_size=ngram_size,
                strides=1,
                padding='same',
                dilation_rate=1,
                activation='relu',