コード例 #1
0
def test(args, test_list, model_list, net_input_shape):
    if args.weights_path == '':
        weights_path = join(args.check_dir,
                            args.output_name + '_model_' + args.time + '.hdf5')
    else:
        weights_path = join(args.data_root_dir, args.weights_path)

    output_dir = join(args.data_root_dir, 'results', args.net,
                      'split_' + str(args.split_num))
    raw_out_dir = join(output_dir, 'raw_output')
    fin_out_dir = join(output_dir, 'final_output')
    fig_out_dir = join(output_dir, 'qual_figs')
    try:
        makedirs(raw_out_dir)
    except:
        pass
    try:
        makedirs(fin_out_dir)
    except:
        pass
    try:
        makedirs(fig_out_dir)
    except:
        pass

    if len(model_list) > 1:
        eval_model = model_list[1]
    else:
        eval_model = model_list[0]
    try:
        eval_model.load_weights(weights_path)
    except:
        print('Unable to find weights path. Testing with random weights.')
    print_summary(model=eval_model, positions=[.38, .65, .75, 1.])

    # Set up placeholders
    outfile = ''
    if args.compute_dice:
        dice_arr = np.zeros((len(test_list)))
        outfile += 'dice_'
    if args.compute_jaccard:
        jacc_arr = np.zeros((len(test_list)))
        outfile += 'jacc_'
    if args.compute_assd:
        assd_arr = np.zeros((len(test_list)))
        outfile += 'assd_'

    # Testing the network
    print('Testing... This will take some time...')

    with open(join(output_dir, args.save_prefix + outfile + 'scores.csv'),
              'wb') as csvfile:
        writer = csv.writer(csvfile,
                            delimiter=',',
                            quotechar='|',
                            quoting=csv.QUOTE_MINIMAL)

        row = ['Scan Name']
        if args.compute_dice:
            row.append('Dice Coefficient')
        if args.compute_jaccard:
            row.append('Jaccard Index')
        if args.compute_assd:
            row.append('Average Symmetric Surface Distance')

        writer.writerow(row)

        for i, img in enumerate(tqdm(test_list)):
            sitk_img = sitk.ReadImage(join(args.data_root_dir, 'imgs', img[0]))
            img_data = sitk.GetArrayFromImage(sitk_img)
            num_slices = img_data.shape[0]

            output_array = eval_model.predict_generator(
                generate_test_batches(args.data_root_dir, [img],
                                      net_input_shape,
                                      batchSize=args.batch_size,
                                      numSlices=args.slices,
                                      subSampAmt=0,
                                      stride=1),
                steps=num_slices,
                max_queue_size=1,
                workers=1,
                use_multiprocessing=False,
                verbose=1)

            if args.net.find('caps') != -1:
                output = output_array[0][:, :, :, 0]
                #recon = output_array[1][:,:,:,0]
            else:
                output = output_array[:, :, :, 0]

            output_img = sitk.GetImageFromArray(output)
            print('Segmenting Output')
            output_bin = threshold_mask(output, args.thresh_level)
            output_mask = sitk.GetImageFromArray(output_bin)

            output_img.CopyInformation(sitk_img)
            output_mask.CopyInformation(sitk_img)

            print('Saving Output')
            sitk.WriteImage(
                output_img,
                join(raw_out_dir, img[0][:-4] + '_raw_output' + img[0][-4:]))
            sitk.WriteImage(
                output_mask,
                join(fin_out_dir, img[0][:-4] + '_final_output' + img[0][-4:]))

            # Load gt mask
            sitk_mask = sitk.ReadImage(
                join(args.data_root_dir, 'masks', img[0]))
            gt_data = sitk.GetArrayFromImage(sitk_mask)

            # Plot Qual Figure
            print('Creating Qualitative Figure for Quick Reference')
            f, ax = plt.subplots(1, 3, figsize=(15, 5))

            ax[0].imshow(img_data[img_data.shape[0] // 3, :, :],
                         alpha=1,
                         cmap='gray')
            ax[0].imshow(output_bin[img_data.shape[0] // 3, :, :],
                         alpha=0.5,
                         cmap='Blues')
            ax[0].imshow(gt_data[img_data.shape[0] // 3, :, :],
                         alpha=0.2,
                         cmap='Reds')
            ax[0].set_title('Slice {}/{}'.format(img_data.shape[0] // 3,
                                                 img_data.shape[0]))
            ax[0].axis('off')

            ax[1].imshow(img_data[img_data.shape[0] // 2, :, :],
                         alpha=1,
                         cmap='gray')
            ax[1].imshow(output_bin[img_data.shape[0] // 2, :, :],
                         alpha=0.5,
                         cmap='Blues')
            ax[1].imshow(gt_data[img_data.shape[0] // 2, :, :],
                         alpha=0.2,
                         cmap='Reds')
            ax[1].set_title('Slice {}/{}'.format(img_data.shape[0] // 2,
                                                 img_data.shape[0]))
            ax[1].axis('off')

            ax[2].imshow(img_data[img_data.shape[0] // 2 +
                                  img_data.shape[0] // 4, :, :],
                         alpha=1,
                         cmap='gray')
            ax[2].imshow(output_bin[img_data.shape[0] // 2 +
                                    img_data.shape[0] // 4, :, :],
                         alpha=0.5,
                         cmap='Blues')
            ax[2].imshow(gt_data[img_data.shape[0] // 2 +
                                 img_data.shape[0] // 4, :, :],
                         alpha=0.2,
                         cmap='Reds')
            ax[2].set_title('Slice {}/{}'.format(
                img_data.shape[0] // 2 + img_data.shape[0] // 4,
                img_data.shape[0]))
            ax[2].axis('off')

            fig = plt.gcf()
            fig.suptitle(img[0][:-4])

            plt.savefig(join(fig_out_dir, img[0][:-4] + '_qual_fig' + '.png'),
                        format='png',
                        bbox_inches='tight')
            plt.close('all')

            row = [img[0][:-4]]
            if args.compute_dice:
                print('Computing Dice')
                dice_arr[i] = dc(output_bin, gt_data)
                print('\tDice: {}'.format(dice_arr[i]))
                row.append(dice_arr[i])
            if args.compute_jaccard:
                print('Computing Jaccard')
                jacc_arr[i] = jc(output_bin, gt_data)
                print('\tJaccard: {}'.format(jacc_arr[i]))
                row.append(jacc_arr[i])
            if args.compute_assd:
                print('Computing ASSD')
                assd_arr[i] = assd(output_bin,
                                   gt_data,
                                   voxelspacing=sitk_img.GetSpacing(),
                                   connectivity=1)
                print('\tASSD: {}'.format(assd_arr[i]))
                row.append(assd_arr[i])

            writer.writerow(row)

        row = ['Average Scores']
        if args.compute_dice:
            row.append(np.mean(dice_arr))
        if args.compute_jaccard:
            row.append(np.mean(jacc_arr))
        if args.compute_assd:
            row.append(np.mean(assd_arr))
        writer.writerow(row)

    print('Done.')
コード例 #2
0
def predict(args, pred_list, pred_model, net_input_shape):
    # Set the path to the prediction weights for the model, either based on user or on training
    if args.test_weights_path != '':
        output_dir = os.path.join(
            args.data_root_dir, 'predictions', args.exp_name, args.net,
            'split_{}'.format(args.split_num),
            os.path.basename(args.test_weights_path)[:-5])
        try:
            pred_model.load_weights(args.test_weights_path)
        except Exception as e:
            print(e)
            raise Exception('Failed to load weights from training.')
    else:
        output_dir = os.path.join(args.data_root_dir, 'predictions',
                                  args.exp_name, args.net,
                                  'split_{}'.format(args.split_num),
                                  args.output_name + '_model_' + args.time)
        try:
            pred_model.load_weights(
                os.path.join(
                    args.check_dir,
                    args.output_name + '_model_' + args.time + '.hdf5'))
        except Exception as e:
            print(e)
            raise Exception('Failed to load weights from training.')

    # Create an output directory for saving predictions
    try:
        os.makedirs(output_dir)
    except:
        pass

    # Create a CSV for saving the predictions
    with open(os.path.join(output_dir, args.save_prefix + 'preds.csv'),
              'w') as csvfile:
        writer = csv.writer(csvfile,
                            delimiter=',',
                            quotechar='|',
                            quoting=csv.QUOTE_MINIMAL)

        row = ['Scan Name', 'Prediction']
        writer.writerow(row)

        # Running predictions through the network
        print('Predicting... This will take some time...')
        output_array = []
        for test_sample in pred_list:
            output_array.append(
                pred_model.predict_generator(generate_test_batches(
                    root_path=args.data_root_dir,
                    test_list=[test_sample],
                    net_shape=net_input_shape,
                    mod_dirs=args.modality_dir_list,
                    exp_name=args.exp_name,
                    net=args.net,
                    MIP_choices=args.MIP_choices,
                    batchSize=1,
                    numSlices=args.slices,
                    subSampAmt=0,
                    stride=1),
                                             steps=1,
                                             max_queue_size=1,
                                             workers=1,
                                             use_multiprocessing=False,
                                             verbose=1))

        # Convert the network output to predictions
        if args.num_classes > 2:
            output = np.argmax(np.squeeze(
                np.asarray(output_array, dtype=np.float32)),
                               axis=1)
        else:
            output = np.copy(np.asarray(output_array, dtype=np.float32)[:, 0])
            output[output < 0.5] = 0
            output[output >= 0.5] = 1

        # Save the names and predictions to a file
        name_list = [x.split('/')[1] for x in np.asarray(pred_list)[:, 0]]
        print(zip(name_list, output, np.squeeze(np.asarray(output_array))))
        writer.writerows(
            zip(name_list, output, np.squeeze(np.asarray(output_array))))

    print('Done.')
コード例 #3
0
def test(args, test_list, model_list, net_input_shape):
    if args.weights_path == '':
        weights_path = join(args.check_dir,
                            args.output_name + '_model_' + args.time + '.hdf5')
    else:
        weights_path = join(args.data_root_dir, args.weights_path)

    output_dir = join(args.data_root_dir, 'results', args.net,
                      'split_' + str(args.split_num))
    raw_out_dir = join(output_dir, 'raw_output')
    fin_out_dir = join(output_dir, 'final_output')
    fig_out_dir = join(output_dir, 'qual_figs')
    try:
        makedirs(raw_out_dir)
    except FileExistsError:
        pass
    try:
        makedirs(fin_out_dir)
    except FileExistsError:
        pass
    try:
        makedirs(fig_out_dir)
    except FileExistsError:
        pass

    if len(model_list) > 1:
        eval_model = model_list[1]
    else:
        eval_model = model_list[0]
    try:
        eval_model.load_weights(weights_path)
    except FileNotFoundError:
        print('Unable to find weights path. Testing with random weights.')
    print_summary(model=eval_model, positions=[.38, .65, .75, 1.])

    # Set up placeholders
    outfile = ''
    if args.compute_dice:
        dice_arr = np.zeros((len(test_list)))
        outfile += 'dice_'
    if args.compute_jaccard:
        jacc_arr = np.zeros((len(test_list)))
        outfile += 'jacc_'
    if args.compute_assd:
        assd_arr = np.zeros((len(test_list)))
        outfile += 'assd_'

    # Testing the network
    print('Testing... This will take some time...')

    with open(join(output_dir, args.save_prefix + outfile + 'scores.csv'),
              'w') as csvfile:
        writer = csv.writer(csvfile,
                            delimiter=',',
                            quotechar='|',
                            quoting=csv.QUOTE_MINIMAL)

        row = ['Scan Name']
        if args.compute_dice:
            row.append('Dice Coefficient')
        if args.compute_jaccard:
            row.append('Jaccard Index')
        if args.compute_assd:
            row.append('Average Symmetric Surface Distance')

        writer.writerow(row)

        for i, img in enumerate(tqdm(test_list)):
            output_array = eval_model.predict_generator(
                generate_test_batches(args.data_root_dir, [img],
                                      net_input_shape,
                                      batchSize=args.batch_size,
                                      numSlices=args.slices,
                                      subSampAmt=0,
                                      stride=1),
                steps=1,
                max_queue_size=0,
                workers=1,
                use_multiprocessing=False,
                verbose=1)

            if args.net.find('caps') != -1:
                output = output_array[0][:, :, :, 0]
                # recon = output_array[1][:,:,:,0]
            else:
                output = output_array[:, :, :, 0]
            print('output')
            print(output.shape)
            # output_img = sitk.GetImageFromArray(output)
            print('Segmenting Output')
            output_bin = threshold_mask(output, args.thresh_level)
            output_bin = output_bin[0, :, :]
            # (raw_output, threshold)
            # output_mask = sitk.GetImageFromArray(output_bin)
            path_to_np = join(args.data_root_dir, 'np_files',
                              img[0][:-3] + 'npz')
            sitk_mask = np.load(path_to_np)
            print('mask')
            gt_data = sitk_mask['mask']
            gt_data = gt_data[:, :, 0]
            intn_data = sitk_mask['img']
            intn_data = intn_data[:, :, 0]
            print(gt_data.shape)

            print('Saving Output')
            indiv_fig_dir = join(fig_out_dir, args.save_prefix)
            try:
                makedirs(indiv_fig_dir)
            except FileExistsError:
                pass

            # Generarte image
            f, ax = plt.subplots(1, 3, figsize=(15, 5))
            ax[0].imshow(intn_data, alpha=1, cmap='gray')
            ax[0].imshow(output_bin, alpha=0.2, cmap='Reds')
            ax[0].set_title('Predict Mask')
            ax[1].imshow(intn_data, alpha=1, cmap='gray')
            ax[1].imshow(gt_data, alpha=0.2, cmap='Blues')
            ax[1].set_title('True Mask')
            ax[2].imshow(output_bin, alpha=0.3, cmap='Reds')
            ax[2].imshow(gt_data, alpha=0.3, cmap='Blues')
            ax[2].set_title('Comparison')
            fig = plt.gcf()
            fig.suptitle(img[0][:-4])
            plt.savefig(join(indiv_fig_dir,
                             img[0][:-4] + '_qual_fig' + '.png'),
                        format='png',
                        bbox_inches='tight')
            plt.close('all')

            row = [img[0][:-4]]
            if args.compute_dice:
                print('Computing Dice')
                dice_arr[i] = dc(output_bin, gt_data)
                print('\tDice: {}'.format(dice_arr[i]))
                row.append(dice_arr[i])
            if args.compute_jaccard:
                print('Computing Jaccard')
                jacc_arr[i] = jc(output_bin, gt_data)
                print('\tJaccard: {}'.format(jacc_arr[i]))
                row.append(jacc_arr[i])

            writer.writerow(row)

        row = ['Average Scores']
        if args.compute_dice:
            row.append(np.mean(dice_arr))
        if args.compute_jaccard:
            row.append(np.mean(jacc_arr))
        if args.compute_assd:
            row.append(np.mean(assd_arr))
        writer.writerow(row)

    print('Done.')
コード例 #4
0
def test(args, test_list, model_list, net_input_shape):
    if args.weights_path == '':
        weights_path = join(args.check_dir, args.output_name + '_model_' + args.time + '.hdf5')
    else:
        weights_path = join(args.data_root_dir, args.weights_path)

    output_dir = join(args.data_root_dir, 'results', args.net)
    raw_out_dir = join(output_dir, 'raw_output')
    fin_out_dir = join(output_dir, 'final_output')
    fig_out_dir = join(output_dir, 'qual_figs')
    try:
        makedirs(raw_out_dir)
    except:
        pass
    try:
        makedirs(fin_out_dir)
    except:
        pass
    try:
        makedirs(fig_out_dir)
    except:
        pass

    if len(model_list) > 1:
        eval_model = model_list[1]
    else:
        eval_model = model_list[0]
    try:
        eval_model.load_weights(weights_path)
    except:
        print('Unable to find weights path. Testing with random weights.')
    # print_summary(model=eval_model, positions=[.38, .65, .75, 1.])
    eval_model.summary()
    # Set up placeholders
    outfile = ''
    dice_arr = {}
    outfile += 'dice_'
    jacc_arr = {}
    outfile += 'jacc_'

    # Testing the network
    print('Testing... This will take some time...')

    with open(join(output_dir, args.save_prefix + outfile + 'scores.csv'), 'w') as csvfile:
        writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)

        row = ['Scan Name', 'Dice Coefficient', 'Jaccard Index']

        writer.writerow(row)

        num_l = num_labels(args.data_root_dir)
        label_stats = {i: [] for i in range(num_l)}
        prediction_scores = {i: [] for i in range(num_l)}
        # labels = []
        # prediction_scores = []

        for scan in tqdm(test_list):
            volume = scan[0]
            if not volume in dice_arr:
                dice_arr[volume] = []
                jacc_arr[volume] = []
            img = Image.open(join(args.data_root_dir, 'test', volume, 'images', scan[1]))
            resized_img = img.resize((net_input_shape[1], net_input_shape[0]))

            output_array = eval_model.predict(generate_test_batches(args.data_root_dir, [scan],
                                                                    net_input_shape,
                                                                    batch_size=args.batch_size),
                                              steps=1, max_queue_size=1, workers=1,
                                              use_multiprocessing=False, verbose=1)

            if args.net.find('caps') != -1:
                output = output_array
                # recon = output_array[1]
            elif args.net == 'matwo':
                output = output_array
            else:
                output = output_array[:, :, :, 0]


            print('Segmenting Output')
            shade_number = output.shape[-1]
            shades = np.linspace(0, 255, shade_number)
            output = np.argmax(output, axis=-1)
            output = np.squeeze(output, axis=0)
            output_bin = output.copy()
            for j, shade in enumerate(shades):
                output_bin[output_bin == j] = int(shade)
            output_bin = output_bin.astype('uint8')
            output_mask = sitk.GetImageFromArray(output_bin)

            print('Saving Output')
            sitk.WriteImage(output_mask, join(fin_out_dir, scan[1][:-4] + '_final_output' + scan[1][-4:]))

            mask_data = np.array(
                Image.open(join(args.data_root_dir, 'test', volume, 'masks', scan[2])).resize((net_input_shape[1], net_input_shape[0]), Image.NEAREST),
                dtype=np.uint8)

            output_array = softmax(output_array, axis=-1)
            for j in range(num_l):
                y = np.zeros(mask_data.shape)
                y[mask_data == j] = 1
                label_stats[j].append(y.flatten())
                score = output_array[..., j]
                prediction_scores[j].append(score.flatten())

            # labels.append(mask_data.flatten())
            # prediction_scores.append(softmax(output_array, axis=-1).reshape(-1, output_array.shape[-1]))

            # Plot Qual Figure
            print('Creating Qualitative Figure for Quick Reference')
            if 'Cirrus' in args.data_root_dir:
                f, ax = plt.subplots(1, 3, figsize=(10, 5))
            else:
                f, ax = plt.subplots(1, 3, figsize=(15, 5))

            ax[0].imshow(img, alpha=1, cmap='gray')
            ax[0].set_title('Originalni sken')
            ax[1].imshow(resized_img, alpha=1, cmap='gray')
            ax[1].imshow(mask_to_rgb(output), alpha=0.5)
            # ax[1].imshow(output, alpha=1, cmap='gray')
            ax[1].set_title('Izlaz iz mreže')
            ax[2].imshow(resized_img, alpha=1, cmap='gray')
            ax[2].imshow(mask_to_rgb(mask_data), alpha=0.5)
            # ax[2].imshow(mask_data, alpha=1, cmap='gray')
            ax[2].set_title('Referentna maska')
            # ax[0].imshow(mask_data, alpha=1, cmap='gray')
            # ax[1].imshow(mask_to_rgb(mask_data), alpha=1)

            fig = plt.gcf()
            # fig.suptitle(scan[1][:-4])

            plt.savefig(join(fig_out_dir, scan[1][:-4] + '_qual_fig' + '.png'),
                        format='png', bbox_inches='tight')
            plt.close('all')

            row = [scan[1][:-4]]
            print('Computing Dice')
            dice = DiceMetric()
            dice_score = dice(output, mask_data, range(num_l))
            dice_arr[volume].append(dice_score)
            print('\tDice: {}'.format(dice_score))
            row.append(dice_score)
            print('Computing Jaccard')
            jaccard = JaccardMetric()
            jacc_score = jaccard(output, mask_data, range(num_l))
            jacc_arr[volume].append(jacc_score)
            print('\tJaccard: {}'.format(jacc_score))
            row.append(jacc_score)

            writer.writerow(row)

        writer.writerow('Volume averages:')
        volume_dice_avgs = []
        volume_jacc_avgs = []
        for v in dice_arr:
            volume_dice_avg = np.mean(np.stack(dice_arr[v]), axis=0)
            volume_jacc_avg = np.mean(np.stack(jacc_arr[v]), axis=0)
            volume_dice_avgs.append(volume_dice_avg)
            volume_jacc_avgs.append(volume_jacc_avg)
            row = [v, volume_dice_avg, volume_jacc_avg]
            writer.writerow(row)
        volume_dice_avgs = np.stack(volume_dice_avgs)
        dice_mean = np.mean(volume_dice_avgs, axis=0)
        volume_jacc_avgs = np.stack(volume_jacc_avgs)
        jacc_mean = np.mean(volume_jacc_avgs, axis=0)
        row = ['Average Scores', dice_mean, jacc_mean]
        writer.writerow(row)

        fpr = []
        tpr = []
        auc_scores = []
        for i in range(1, num_l):
            labels = np.stack(label_stats[i]).flatten()
            scores = np.stack(prediction_scores[i]).flatten()
            fpr_rf, tpr_rf, thresholds_rf = roc_curve(labels, scores)
            fpr.append(fpr_rf)
            tpr.append(tpr_rf)
            auc_rf = auc(fpr_rf, tpr_rf)
            auc_scores.append(auc_rf)
        plot_roc_curves(args, fpr, tpr, auc_scores, output_dir)

        # labels = np.stack(labels).flatten()
        # prediction_scores = np.stack(prediction_scores).reshape(-1, output_array.shape[-1])
        #
        # roc_auc = roc_auc_score(labels, prediction_scores, multi_class='ovr')
        print('AUC score: {}'.format(auc_scores))
        row = ['AUC Scores', auc_scores]
        writer.writerow(row)

    print('Done.')
コード例 #5
0
def test(args, test_list, eval_model, net_input_shape):
    # Set the path to the testing weights for the model, either based on user or on training
    if args.test_weights_path != '':
        output_dir = os.path.join(args.data_root_dir, 'results', args.exp_name, args.net,
                                  'split_{}'.format(args.split_num), os.path.basename(args.test_weights_path)[:-5])
        try:
            eval_model.load_weights(args.test_weights_path)
        except Exception as e:
            print(e)
            raise Exception('Failed to load weights from training.')
    else:
        output_dir = os.path.join(args.data_root_dir, 'results', args.exp_name, args.net,
                                  'split_{}'.format(args.split_num), args.output_name + '_model_' + args.time)
        try:
            eval_model.load_weights(os.path.join(args.check_dir, args.output_name + '_model_' + args.time + '.hdf5'))
        except Exception as e:
            print(e)
            raise Exception('Failed to load weights from training.')

    # Create an output directory for saving results
    try:
        os.makedirs(output_dir)
    except:
        pass

    # Create a CSV for saving the results
    with open(os.path.join(output_dir, args.save_prefix + 'scores.csv'), 'w') as csvfile:
        writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)

        row = ['Scan Name', 'Prediction', 'Label']
        writer.writerow(row)

        # Testing the network
        print('Testing... This will take some time...')
        output_array = []
        for test_sample in test_list:
            output_array.append(eval_model.predict_generator(
                generate_test_batches(root_path=args.data_root_dir, test_list=[test_sample],
                                      net_shape=net_input_shape, mod_dirs=args.modality_dir_list,
                                      exp_name=args.exp_name, net=args.net, MIP_choices=args.MIP_choices,
                                      batchSize=1, numSlices=args.slices, subSampAmt=0, stride=1),
                steps=1, max_queue_size=1, workers=1, use_multiprocessing=False, verbose=1))

        # Convert the network output to predictions
        if args.num_classes > 2:
            output = np.argmax(np.squeeze(np.asarray(output_array, dtype=np.float32)), axis=1)
        else:
            output = np.copy(np.asarray(output_array, dtype=np.float32)[:,0])
            output[output < 0.5] = 0
            output[output >= 0.5] = 1

        # Save the names, predictions, and GTs to a file
        name_list = [x.split('/')[1] for x in np.asarray(test_list)[:,0]]
        gt_list = np.asarray(test_list)[:,-1].astype(int)
        assert len(gt_list) == len(output), 'Different number of outputs and ground truth labels in testing.'

        print(zip(name_list, np.squeeze(np.asarray(output_array)), gt_list))
        print('Accuracy: {}'.format(accuracy_score(gt_list, output)))
        print('F1 Score: {}'.format(f1_score(gt_list, output, average='macro')))
        print('Precision: {}'.format(precision_score(gt_list, output, average='macro')))
        print('Recall: {}'.format(recall_score(gt_list, output, average='macro')))
        print('Confusion matrix:')
        print(confusion_matrix(gt_list, output))
        print('Classification Report:')
        print(classification_report(gt_list, output))

        writer.writerows(zip(name_list, np.squeeze(np.asarray(output_array)), gt_list))
        writer.writerow(['Accuracy:','{}'.format(accuracy_score(gt_list, output))])
        writer.writerow(['F1 Score:','{}'.format(f1_score(gt_list, output, average='macro'))])
        writer.writerow(['Precision:','{}'.format(precision_score(gt_list, output, average='macro'))])
        writer.writerow(['Recall:','{}'.format(recall_score(gt_list, output, average='macro'))])
        writer.writerow(['Confusion matrix:'])
        writer.writerow(confusion_matrix(gt_list, output))
        writer.writerow(['Classification Report:'])
        writer.writerow(classification_report(gt_list, output))

    print('Done.')