Beispiel #1
0
 def predict(self, images, oversample=False):
     """Computes all the probabilities of given images.
     Args:
         images (iterable of PIL.Image or numpy.ndarray): Input images.
             When you specify a color image as a :class:`numpy.ndarray`,
             make sure that color order is RGB.
         oversample (bool): If ``True``, it averages results across
             center, corners, and mirrors. Otherwise, it uses only the
             center.
     Returns:
         ~chainer.Variable: Output that contains the class probabilities
         of given images.
     """
     #x = concat_examples([prepare(img, size=(256, 256)) for img in images])
     #x = concat_examples([prepare(img, size=(224, 224)) for img in images])
     # Use no_backprop_mode to reduce memory consumption
     x = images
     #print(x)
     with function.no_backprop_mode(), chainer.using_config('train', False):
         #x = Variable(x)
         #x = Variable(self.base.xp.asarray(x))
         #print('predicting',x.dtype,x.shape,type(x))
         y = self.extract(x, layers=['fc6'])['fc6']
         #print('y',y.shape,type(y))
         y = self.fc7(y)
         y = softmax(y)  #probability calc
     return y
def gumbel_softmax(log_pi, tau=0.1, axis=1):
    """Gumbel-Softmax sampling function.

    This function draws samples :math:`y_i` from Gumbel-Softmax distribution,
        :math:`y_i = {\\exp((g_i + \\log\\pi_i)/\\tau) \
                    \\over \\sum_{j}\\exp((g_j + \\log\\pi_j)/\\tau)}`,
        where :math:`\\tau` is a temperature parameter and \
            :math:`g_i` s are samples drawn from \
            Gumbel distribution :math:`Gumbel(0, 1)`

    See `Categorical Reparameterization with Gumbel-Softmax \
    <https://arxiv.org/abs/1611.01144>`_.

    Args:
        log_pi (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
        :class:`cupy.ndarray`): Input variable representing pre-normalized
            log-probability :math:`\\log\\pi`.
        tau (:class:`~float`): Input variable representing \
            temperature :math:`\\tau`.

    Returns:
        ~chainer.Variable: Output variable.

    """
    xp = cuda.get_array_module(log_pi)
    dtype = log_pi.dtype
    g = xp.random.gumbel(size=log_pi.shape).astype(dtype)
    y = softmax((log_pi + g) / tau, axis=axis)

    return y
def softmax_generalized_dice_loss(x, t, eps=1e-7):
    x1 = softmax.softmax(x, axis=1)
    return generalized_dice_loss(x1, t, eps)
Beispiel #4
0
def _forward(x):
    h = Variable(x)
    for layer in xrange(len(model)-1):
        h = F.relu(model[layer](h))
    return softmax(model[-1](h)).data
Beispiel #5
0
def main():

    start_time = time.time()
    ap = ArgumentParser(description='python test_cc.py')
    ap.add_argument('--indir',
                    '-i',
                    nargs='?',
                    default='datasets/test',
                    help='Specify input files directory for learning data')
    ap.add_argument(
        '--outdir',
        '-o',
        nargs='?',
        default='results/result_test',
        help='Specify output files directory for create save model files')
    ap.add_argument('--test_list',
                    nargs='?',
                    default='datasets/split_list/test.list',
                    help='Specify split test list')
    ap.add_argument(
        '--init_model',
        help='Specify Loading File Path of Learned Cell Classification Model')
    ap.add_argument('--gpu',
                    '-g',
                    type=int,
                    default=-1,
                    help='Specify GPU ID (negative value indicates CPU)')
    ap.add_argument('--crop_size',
                    nargs='?',
                    default='(640, 640)',
                    help='Specify crop size (default (y,x) = (640,640))')
    ap.add_argument(
        '--coordinate',
        nargs='?',
        default='(780, 1480)',
        help='Specify initial coordinate (default (y,x) = (1840,700))')
    ap.add_argument('--nclass',
                    type=int,
                    default=10,
                    help='Specify classification class')

    args = ap.parse_args()
    argvs = sys.argv
    current_datetime = datetime.now(
        pytz.timezone('Asia/Tokyo')).strftime('%Y%m%d_%H%M%S')
    opbase = args.outdir + '_' + str(current_datetime)
    os.makedirs(opbase, exist_ok=True)

    print('init dataset...')
    test_dataset = PreprocessedClassificationDataset(
        path=args.indir,
        split_list=args.test_list,
        crop_size=args.crop_size,
        coordinate=args.coordinate,
        train=False)

    print('init model construction')
    model = Classifier(CCNet(n_class=args.nclass),
                       lossfun=F.softmax_cross_entropy)

    if args.init_model is not None:
        print('Load model from', args.init_model)
        chainer.serializers.load_npz(args.init_model, model)

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    with open(os.path.join(opbase, 'result.csv'), 'w') as f:
        writer = csv.writer(f, lineterminator='\n')
        writer.writerow(['file name', 'prediction', 'label'])

    tp_cnt = 0
    for num in range(test_dataset.__len__()):
        input, _ = test_dataset.get_example(num)
        label = float(test_dataset.split_list[num]
                      [:test_dataset.split_list[num].find('_')])
        x = np.expand_dims(input, axis=0)
        if args.gpu >= 0:
            x = chainer.cuda.to_gpu(x)
        y = model.predict(x)
        if args.gpu >= 0:
            y = chainer.cuda.to_cpu(y.data)
        pre = np.argmax(softmax.softmax(y).data[0]) + 1

        if int(round(label)) == pre:
            print('True')
            tp_cnt += 1
        else:
            print('False')

        with open(os.path.join(opbase, 'result.csv'), 'a') as f:
            writer = csv.writer(f, lineterminator='\n')
            writer.writerow([test_dataset.split_list[num], pre, label])

    with open(os.path.join(opbase, 'result.txt'), 'w') as f:
        f.write('Accuracy: {}%'.format(
            (tp_cnt / test_dataset.__len__()) * 100))

    end_time = time.time()
    etime = end_time - start_time
    print('Elapsed time is (sec) {}'.format(etime))
    print('CCN Completed Process!')
Beispiel #6
0
 def predict(self, x):
     h1 = F.max_pooling_2d(F.relu(self.conv1(x)), self.n_kernel)
     h1_2 = F.relu(self.l1_2(h1))
     h2 = F.relu(self.l1(h1_2))
     y = self.l2(h2)
     return softmax(y)
def predict(model, params, vocab, inv_vocab, src, batch_size, beam_size=1):

    state = initial_state(xp, batch_size, params.hidden_size)

    model.reset()

    for n in range(len(src[0])):
        xb = chainer.Variable(src[0][n].reshape((1, params.input_size)))
        ib = chainer.Variable(xp.array([vocab['']], dtype=xp.int32))
        state = model.encode(xb, ib, state)

    final_sentences = [([], None, None) for i in xrange(beam_size)]
    sentence_candidate = [([], None, None) for i in xrange(beam_size)]
    sentence_candidate_tmp = [([], None, None)
                              for i in xrange(beam_size * beam_size)]
    success = 0
    depth = 0

    sentence_candidate[0] = (['BOS'], state, 0)
    k = 1

    while success < beam_size and depth < 20:
        frame = chainer.Variable(
            xp.zeros((1, params.input_size), dtype=xp.float32))
        j = 0
        for i in xrange(k):
            sentence_tuple = sentence_candidate[i]
            cur_sentence = sentence_tuple[0]
            cur_index = sentence_tuple[0][-1]
            cur_state = sentence_tuple[1]
            cur_log_likely = sentence_tuple[2]

            prev_word = chainer.Variable(
                xp.array([vocab[cur_index]], dtype=xp.int32))
            y, state = model.decode(frame, prev_word, cur_state, batch_size,
                                    xp)
            y_np = cuda.to_cpu(softmax(y).data)
            top_indexes = (y_np[0]).argsort(0)[::-1][:beam_size]
            for index in np.nditer(top_indexes):
                index = int(index)
                probability = y_np[0][index]
                next_sentence = deepcopy(cur_sentence)
                next_sentence.append(inv_vocab[index])
                log_likely = mth.log(probability)
                next_log_likely = cur_log_likely + log_likely
                sentence_candidate_tmp[j] = (next_sentence, state,
                                             next_log_likely)
                j += 1

        prob_np_array = np.array([
            sentence_tuple[2] for sentence_tuple in sentence_candidate_tmp[:j]
        ])
        top_candidates_indexes = (prob_np_array).argsort()[::-1][:beam_size]
        k = 0
        for i in top_candidates_indexes:
            sentence_tuple = sentence_candidate_tmp[i]
            word = sentence_tuple[0][-1]
            if word == 'EOS':
                final_sentence = sentence_tuple[0]
                final_likely = sentence_tuple[2]
                final_probability = mth.exp(final_likely / len(final_sentence))
                final_sentences[success] = (final_sentence, final_probability,
                                            final_likely)
                success += 1
                if success == beam_size:
                    break
            else:
                sentence_candidate[k] = sentence_tuple
                k += 1

        depth += 1

    candidates = []
    for sentence_tuple in final_sentences:
        sentence = [word for word in sentence_tuple[0]][1:-1]
        final_probability = sentence_tuple[1]
        a_candidate = {'sentence': sentence, 'probability': final_probability}
        candidates.append(a_candidate)
    scores = [caption['probability'] for caption in candidates]
    argmax = np.argmax(scores)
    top_caption = candidates[argmax]['sentence']

    sentence = ''
    for word in top_caption:
        sentence += word + ' '

    return sentence.strip()
Beispiel #8
0
def softmax_dice_loss(x, t, eps=1e-7):
    return DiceLoss(eps)(softmax.softmax(x, axis=1), t)
Beispiel #9
0
def softmax_dice_loss(x, t, eps=1e-7, weight=False, encode=True):
    x1 = softmax.softmax(x, axis=1)
    return dice_loss(x1, t, eps, weight, encode)