def seeResult(self):
        image_list = os.listdir(self.images_path)
        image_list.sort(key=lambda x: int(x[:-4]))

        for all_i, name in enumerate(image_list):
            try:
                print("当前查看的图片为:", name)

                filename = name[:-4]
                image_path = os.path.join(self.images_path, name)
                txt_path = os.path.join(self.annotations_path,
                                        filename + ".txt")
                image = cv.imread(image_path)
                txt_file = open(txt_path)
                txt_info = txt_file.readlines()
                txt_file.close()
                ann_infos = []

                for line in txt_info:
                    ann = line.split(',')[:-1]
                    ann_int = map(int, ann)
                    number_ann = list(ann_int)

                    category_id, x1, y1, x2, y2 = number_ann
                    print("当前的bbox的size为:", (x2 - x1) * (y2 - y1))
                    print("*************")
                    ann_infos.append((category_id, x1, y1, x2, y2))

                tools.visualize(ann_infos, image)
                cv.waitKey(0)
            except:
                print("发生错误,错误位置在名称{}".format(name))
                cv.waitKey(1)

        cv.destroyAllWindows()
 def refresh_data(self, game, parent):
     for row in range(0, settings.ROWS):
         for column in range(0, settings.COLUMNS):
             text = str(game.display_board[row][column])
             self.element = QTableWidgetItem(text)
             if text != '':
                 tools.visualize(self.element, text)
             self.setItem(row, column, self.element)
     if game.status != settings.GAME_STATUS['playing']:
         parent.popup(game.status)
     self.game.display_board = game.display_board
예제 #3
0
def main(img_path, json_path=None, viz=True, renderer=None, config=None):
    sess = tf.Session()
    model = RunModel(config, sess=sess)

    cropped_imgs, params, og_imgs = preprocess_image(img_path, config.img_size,
                                                     json_path)
    # Add batch dimension: 1 x D x D x 3
    input_imgs = [np.expand_dims(input_img, 0) for input_img in cropped_imgs]

    # Theta is the 85D vector holding [camera, pose, shape]
    # where camera is 3D [s, tx, ty]
    # pose is 72D vector holding the rotation of 24 joints of SMPL in axis angle format
    # shape is 10D shape coefficients of SMPL
    for k in range(len(input_imgs)):
        joints, verts, cams, joints3d, theta = model.predict(input_imgs[k],
                                                             get_theta=True)
        print(joints.shape)
        print(verts.shape)
        print(cams.shape)
        print(joints3d.shape)
        print(theta.shape)
        if viz:
            visualize(og_imgs[k], params[k], joints[0], verts[0], cams[0],
                      renderer)
예제 #4
0
from tools import preprocess, confusion, visualize
import matplotlib.pyplot as plt

images_train, images_test, images_valid, labels_train, labels_test, labels_valid = preprocess(
)

neigh = KNeighborsClassifier(n_neighbors=9, weights='distance')
neigh.fit(images_train, labels_train)
result = neigh.predict(images_test)
matrix = confusion(labels_test, result)
print(matrix)
print(accuracy_score(labels_test, result))

figure, ax = plt.subplots()
plt.ylabel('Predictions')
plt.xlabel('Actual')
plt.title('Confusion Matrix for KNearestNeighbor')
plt.xticks([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
plt.yticks([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])

ax.matshow(matrix, cmap=plt.cm.Spectral)
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
for i in x:
    for j in x:
        c = matrix[j, i]
        ax.text(i, j, str(c), va='center', ha='center')

plt.show()

visualize(images_test, labels_test, result)
예제 #5
0
def showPrediction(coposition):
    visualize(coposition, category_id_to_name)
예제 #6
0
import argparse
import yaml

import tools

parser = argparse.ArgumentParser()
parser.add_argument('-visualize', action='store_true')
parser.add_argument('-predict', action='store_true')
parser.add_argument('-slice', action='store_true')
parser.add_argument('-evaluate', action='store_true')
args = parser.parse_args()

with open('./configs.yaml', 'r') as stream:
    try:
        configs = yaml.safe_load(stream)
    except yaml.YAMLError as exc:
        print(exc)

if args.visualize:
    tools.visualize(configs)
elif args.predict:
    tools.predict(configs)
elif args.slice:
    tools.slice_vids(configs)
elif args.evaluate:
    tools.evaluate(configs)
else:
    raise ValueError(
        'Did not set flag: -visualize, -predict, -slice, -evaluate')
예제 #7
0
bas_predict = basTree.predict(images_test)
mod_predict = modTree.predict(images_test)
cust_predict = custTree.predict(cimg_test)

# create the confusion matrix for the base tree
cm_bas = tools.confusion(labels_test, bas_predict)

print('Classification Report for Basic Tree')
print('Basic Tree Test Accuracy')
print(accuracy_score(labels_test, bas_predict))
print(classification_report(labels_test, bas_predict))
tools.dispMatrix(cm_bas, 'Base Decision Tree Confusion Matrix')

# Look at visualizations for base decision tree
print('Visualization of 3 mistakes made in base tree')
tools.visualize(images_test, labels_test, bas_predict)

# create the confusion matrix for the modified tree
cm_mod = tools.confusion(labels_test, mod_predict)
print('Classification Report for Modified Tree')
print('Modified Tree Test Accuracy')
print(accuracy_score(labels_test, mod_predict))
print(classification_report(labels_test, mod_predict))
tools.dispMatrix(cm_mod, 'Modified Decision Tree Confusion Matrix')
# Look at the visualizations for the modified decision tree
print('Visualization of 3 mistakes made in modified tree')
tools.visualize(images_test, labels_test, mod_predict)

# create the confusion matrix for the Custom Features Tree
cm_cust = tools.confusion(labels_test, cust_predict)
print(cm_cust)