コード例 #1
0
 def __init__(self, dataset_name, mode):
     self.mode = mode
     if isinstance(dataset_name, list):
         self.images = [[], []]
         for sub_name in dataset_name:
             image_A, image_B = load_images(
                 os.path.join(dataset_dir, sub_name), mode, sub_name
                 in ["facades", "cityscapes"])
             self.images[0].extend(image_A)
             self.images[1].extend(image_B)
         images = list(zip(self.images[0], self.images[1]))
         np.random.shuffle(images)
         self.images[0], self.images[1] = zip(*images)
     else:
         self.images = load_images(
             os.path.join(dataset_dir, dataset_name), mode, dataset_name
             in ["facades", "cityscapes"])
コード例 #2
0
ファイル: societes3.py プロジェクト: Danquebec/Societes
def main():
    '''Declares variables, loads images, loads the game map, and starts
the game loop.'''
    global FPSCLOCK
    FPSCLOCK = pygame.time.Clock()

    # “mouse” is the position of the mouse cursor.
    # “left clicked”, “left mouse down”, “right clicked” and “right
    # mouse down” are all about the mouse buttons.
    # “key up” is when a general character key of the keyboard was
    # pressed.
    # “shift” is when shift is down.
    # “arrow key up” is when an arrow key has been pressed.
    player_input = {'mouse':(0, 0),
                    'left clicked': False, 'left mouse down':False,
                    'right clicked':False, 'right mouse down':False,
                    'key up':None, 'shift':False,
                    'arrow key up': None}

    gui.set_mode()
    pygame.display.set_caption('Sociétés')

    start_load = time()
    images_loaded = loader.load_images()
    print('Images loaded!  Time spent:{}'.format(time() - start_load))
    map_ = world.Map()  # game map
    print('Map instanced! Time spent: {}'.format(time() - start_load))
    world_time = world.Time()  # what time is it?

    # list of nomad groups on the game map
    list_of_nomad_groups = humans.place_test_stuff()

    event.set_allowed()

    dialog_box = gui.DialogBox()

    main_loop(player_input, images_loaded, map_, world_time,
              list_of_nomad_groups, dialog_box)
コード例 #3
0
    clf.fit(x_train, y_train)
    score = clf.score(x_test, y_test)
    # y_pred = clf.predict(x_test)
    # accuracy = sum([y_pred[i] == y_test[i] for i in range(len(y_test))]) / len(y_test)
    print('the score of {}--{} is {}'.format(kernel, C, str(score)))


if __name__ == '__main__':

    param_config = InceptionModel()

    if not os.listdir(param_config.DEST_PATH):
        augment.ImageGen(param_config.PATH, param_config.DEST_PATH).gen_image()
    print("generate images end...")

    data_set = loader.load_images(param_config.DEST_PATH)
    X = [data[0] for data in data_set]
    Y = [data[1] for data in data_set]

    features = extract_features(param_config.MODEL_PATH, X, param_config.PICKLE_X_FILE)
    # 使用label会报错,svm中y值不能是one-hot形式
    x_train, x_test, y_train, y_test = train_test_split(features, Y, test_size=0.1, random_state=0)

    # 从2048维度降到200
    n_components = 200
    pca = PCA(n_components=n_components).fit(x_train)

    x_train_pca = pca.transform(x_train)
    x_test_pca = pca.transform(x_test)

    param_grid = {
コード例 #4
0
 def test_load_image_kvs(self):
     loader.load_images(self.kvs, self.image_iterator)
     for key, value in self.image_iterator:
         self.assertItemsEqual(self.kvs.get(key), [value])
コード例 #5
0
import loader
import config
from scipy.misc import imread
import matplotlib.pyplot as plt
import seaborn as sns

base_config = config.BaseConfig
data_set = loader.load_images(base_config.PATH)


def plt_images_size():
    images_size = {}
    for data in data_set:
        image_array = imread(data[0])
        size = "*".join(map(str, list(image_array.shape)))
        images_size[size] = images_size.get(size, 0) + 1
    plt.figure(figsize=(12, 4))
    sns.barplot(list(images_size.keys()), list(images_size.values()), alpha=0.8)
    plt.xlabel("image size", fontsize=12)
    plt.ylabel("number of images", fontsize=12)
    plt.title("images size present in dataset")
    plt.savefig('./data/image/images-size-proportion.jpg')


plt_images_size()
コード例 #6
0
def detection_loop(args, num_classes):
    write = 0
    CUDA = torch.cuda.is_available()
    model = load_network(args)
    inp_dim = int(model.net_info["height"])
    imlist, im_batches, im_dim_list, loaded_ims, read_dir, load_batch = load_images(
        args, inp_dim)
    start_det_loop = time.time()
    output_recast = 0
    for i, batch in enumerate(im_batches):
        # load the image
        start = time.time()
        with torch.no_grad():
            prediction = model(Variable(batch).cuda(), CUDA)
            prediction = write_results(prediction,
                                       float(args.confidence),
                                       num_classes,
                                       nms_conf=float(args.nms_thresh))

        end = time.time()
        output_recast += end - start
        if type(prediction) == int:
            for im_num, image in enumerate(
                    imlist[i * int(args.bs):min((i + 1) *
                                                int(args.bs), len(imlist))]):
                im_id = i * int(args.bs) + im_num
                print("{0:20s} predicted in {1:6.3f} seconds".format(
                    image.split("/")[-1], (end - start) / int(args.bs)))
                print("{0:20s} {1:s}".format("Objects Detected:", ""))
                print(
                    "----------------------------------------------------------"
                )
            continue
        prediction[:, 0] += i * args.bs
        # transform the atribute from index in batch to index in imlist

        if not write:  # If we have't initialised output
            output = prediction
            write = 1
        else:
            output = torch.cat((output, prediction))

        for im_num, image in enumerate(
                imlist[i * args.bs:min((i + 1) * args.bs, len(imlist))]):
            im_id = i * args.bs + im_num
            objs = [classes[int(x[-1])] for x in output if int(x[0]) == im_id]
            print("{0:20s} predicted in {1:6.3f} seconds".format(
                image.split("/")[-1], (end - start) / args.bs))
            print("{0:20s} {1:s}".format("Objects Detected:", " ".join(objs)))
            print("----------------------------------------------------------")

        if CUDA:
            torch.cuda.synchronize()
    try:
        output
    except NameError:
        print("No detections were made")
        exit()

    im_dim_list = torch.index_select(im_dim_list, 0, output[:, 0].long())

    scaling_factor = torch.min(inp_dim / im_dim_list, 1)[0].view(-1, 1)

    output[:, [1, 3]] -= (inp_dim -
                          scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
    output[:, [2, 4]] -= (inp_dim -
                          scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2

    output[:, 1:5] /= scaling_factor

    for i in range(output.shape[0]):
        output[i, [1, 3]] = torch.clamp(output[i, [1, 3]], 0.0, im_dim_list[i,
                                                                            0])
        output[i, [2, 4]] = torch.clamp(output[i, [2, 4]], 0.0, im_dim_list[i,
                                                                            1])

    class_load = time.time()
    colors = pkl.load(open("data/pallete", "rb"))
    draw = time.time()

    def Write(x, results, color):
        c1 = tuple(x[1:3].int())
        c2 = tuple(x[3:5].int())
        img = results[int(x[0])]
        cls = int(x[-1])
        label = "{0}".format(classes[cls])
        cv2.rectangle(img, c1, c2, color[cls], 1)
        t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
        c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
        cv2.rectangle(img, c1, c2, color[cls], -1)
        cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4),
                    cv2.FONT_HERSHEY_PLAIN, 1, [225, 255, 255], 1)
        return img

    list(map(lambda x: Write(x, loaded_ims, colors), output))
    det_names = pd.Series(imlist).apply(
        lambda x: "{}/det_{}".format(args.det,
                                     x.split("/")[-1]))

    list(map(cv2.imwrite, det_names, loaded_ims))
    end = time.time()

    print("SUMMARY")
    print("----------------------------------------------------------")
    print("{:25s}: {}".format("Task", "Time Taken (in seconds)"))
    print()
    print("{:25s}: {:2.3f}".format("Reading addresses", load_batch - read_dir))
    print("{:25s}: {:2.3f}".format("Loading batch",
                                   start_det_loop - load_batch))
    print("{:25s}: {:2.3f}".format(
        "Detection (" + str(len(imlist)) + " images)", output_recast))
    print("{:25s}: {:2.3f}".format("Output Processing",
                                   class_load - start_det_loop))
    print("{:25s}: {:2.3f}".format("Drawing Boxes", end - draw))
    print("{:25s}: {:2.3f}".format("Average time_per_img",
                                   (end - load_batch) / len(imlist)))
    print("----------------------------------------------------------")

    torch.cuda.empty_cache()
コード例 #7
0
import loader
import augment
import tensorflow as tf
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np

data_set = loader.load_images("../data/train/")


class TestDataAug(tf.test.TestCase):
    def test_data_aug(self):

        img = image.load_img(data_set[0][0])
        plt.imshow(img)
        plt.savefig("../data/image/origin.jpg")
        plt.show()
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)

        train_set = augment.DataGen(x, np.array([1]), batch_size=1)
        plt.figure()
        for i in range(3):
            for j in range(3):
                _x, y = train_set.next()
                idx = (3 * i) + j
                # plt.subplot(3, 3, idx + 1)
                plt.imshow(_x[0] / 256)
                plt.savefig('../data/image/{}-{}.jpg'.format(i, j))
                plt.show()