Exemplo n.º 1
0
    def addToDataset(self, path, label=None, dataFile='', labelFile=''):
        """
        adds a set of image matches to train_data based on a log file of labelled matches
        :param path: the path to the matches log
        :param label: an integer np_array representing the label. When not given defaults to the labels given in the log file
        :param dataFile: the path to optionally save the data object to
        :param labelFile: the path to optionally save the label object to
        :return: None
        """

        imLoader = ImageLoader(path, image_size=self.img_dims)
        if (self.data.shape[0] == 0):
            self.data = imLoader.train_set
            if (label != None):
                entries = self.data.shape[0]
                self.labels = np.vstack([label] * entries)
            else:
                self.labels = imLoader.label_set
        else:
            self.data = np.concatenate((self.data, imLoader.train_set), 0)
            entries = imLoader.train_set.shape[0]
            if (label != None):
                self.labels = np.concatenate(
                    (self.labels, np.vstack([label] * entries)), 0)
            else:
                self.labels = np.concatenate((self.labels, imLoader.label_set),
                                             0)
        imLoader.closeFile()

        if dataFile != '' and labelFile != '':
            np.save(open(dataFile, 'wb'), self.data)
            np.save(open(labelFile, 'wb'), self.labels)
 def load_batch(self,path):
     '''
     Loading a image batch
     '''
     batch_loader = ImageLoader()
     
     return batch_loader.load_image(path)
Exemplo n.º 3
0
 def startLoading_(self, sender):
     self.startLoadingButton.setEnabled_(False)
     loader = ImageLoader(self.selectedPath,
                          self.setNameTextField.stringValue(),
                          self.groupByColorCheckbox.state() == NSOnState,
                          self)
     self.worker = ImageLoaderWorker(loader)
     self.worker.daemon = True
     self.worker.start()
Exemplo n.º 4
0
    def test_images_are_resized(self):

        loader = ImageLoader()
        loader.load_from(imageLocation, 1)

        images = loader.get_images()

        for image in images:
            self.assertEqual('(30, 30, 3)', str(image.shape))
Exemplo n.º 5
0
    def openFile(self):
        filename = tk.filedialog.askopenfilename()
        imageLoader = ImageLoader()
        image = imageLoader.get_image(filename)

        model = RoadSignModel()
        model.load('./models/fullset-10epochs.h5')

        category = model.predict_class(image)
        print('Category =', self.sign_name(category - 1))
        self.category.set(category)
Exemplo n.º 6
0
    def xtest_TrainModel(self):
        
        imageLoader = ImageLoader()
        imageLoader.load_from('./test-images', 1)
        model = RoadSignModel(imageLoader)

        self.assertEqual(False, model.is_trained())

        model.train()

        self.assertEqual(True, model.is_trained())
Exemplo n.º 7
0
    def test_SaveModel(self):

        imageLoader = ImageLoader()
        imageLoader.load_from('./test-images', 1)
        model = RoadSignModel(imageLoader)

        os.remove('model.h5')

        model.train()
        model.save()

        self.assertEqual(True, os.path.exists('model.h5'))
Exemplo n.º 8
0
    def loadAssets(self):
        """Game class method to load all assets.

        """
        #loading assets (images, sounds)
        self.imageLoader = ImageLoader(self)
        self.soundLoader = snd.Sound()
        try:
            self.imageLoader.load_assets()
            self.soundLoader.load()
        except Exception:
            traceback.print_exc()
            self.running = False
Exemplo n.º 9
0
    def test_load_images_from_two_categories(self):

        numberOfImages = 60

        loader = ImageLoader()
        loader.load_from(imageLocation, 2)

        images = loader.get_images()
        labels = loader.get_labels()

        self.assertEqual(numberOfImages, len(images))
        self.assertIs(np.ndarray, type(images))
        self.assertIs(np.ndarray, type(images[0]))
        np.testing.assert_array_equal(np.zeros(30), labels[:30])
        np.testing.assert_array_equal(np.ones(30), labels[30:numberOfImages])
        self.assertIs(np.ndarray, type(labels))
Exemplo n.º 10
0
    def test_load_images_from_one_category(self):

        numberOfImages = 30

        loader = ImageLoader()
        loader.load_from(imageLocation, 1)

        images = loader.get_images()
        labels = loader.get_labels()

        self.assertEqual(numberOfImages, len(images))
        self.assertIs(np.ndarray, type(images))
        self.assertIs(np.ndarray, type(images[0]))

        np.testing.assert_array_equal(np.zeros(numberOfImages), labels)
        self.assertIs(np.ndarray, type(labels))
Exemplo n.º 11
0
    def accuracy(self,
                 net,
                 path,
                 epoch,
                 phase,
                 device,
                 log_path,
                 batch_size=64,
                 do_logwrite=False):
        try:
            os.makedirs(log_path)
        except OSError:
            pass

        category = path.split('/')[-1] + "_" + phase
        with torch.no_grad():
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.5, ), (0.5, ))  # Normalize [-1, 1]
            ])

            dataset = ImageLoader(path, phase, transforms=transform)
            data_loader = torch.utils.data.DataLoader(dataset,
                                                      batch_size=batch_size,
                                                      shuffle=False)

            correct = 0
            for idx, (images, labels) in enumerate(data_loader):
                images = images.to(device)
                labels = labels.to(device)
                outputs = net(images)
                _, pred_y = torch.max(outputs.data, 1)

                correct += (pred_y == labels).sum().item()

            accuracy = (correct / len(dataset)) * 100

            print('phase:%s --- correct [%d/%d]  acc: %.2f%%' %
                  (category, correct, len(dataset), accuracy))

            if do_logwrite:
                f = open('%s/%s_log.txt' % (log_path, category), 'a')
                f.write('epoch:%d, phase:%s --- correct [%d/%d]  %.4f%%\n' %
                        (epoch, phase, correct, len(dataset), accuracy))
                f.close()

            return accuracy
Exemplo n.º 12
0
def main():
    image_loader = ImageLoader(3)
    images = image_loader.get_next_image_batch()

    print (len(images))
    print (images[0].get_label()) 
    print (images[-1].get_label())
    pink = []
    tiel = []
    for i in images:
        if (i.get_label() == "cockatiel"):
            tiel.append(i)
        elif (i.get_label() == "pink_cockatoo"):
            pink.append(i)
    
    print (len(pink))
    print (len(tiel))
Exemplo n.º 13
0
    def __init__(self):
        t = ImageGrab.grab().convert("RGB")
        self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR)

        self.ultLoader = ImageLoader('image/ult/')

        if self.have('topleft'):
            tl = self._imageLoader.get('topleft')
            res = cv2.matchTemplate(self.screen, tl, cv2.TM_CCOEFF_NORMED)

            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x1, y1 = max_loc
            rd = self._imageLoader.get('rightdown')
            res = cv2.matchTemplate(self.screen, rd, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x2, y2 = max_loc
            # default 989
            GameStatus().y = y2 - y1
            GameStatus().use_Droid4X = True
Exemplo n.º 14
0
    def FRR(self,
            net,
            path,
            device,
            log_path,
            batch_size=64,
            do_logwrite=False):
        try:
            os.makedirs(log_path)
        except OSError:
            pass

        with torch.no_grad():
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.5, ), (0.5, ))  # Normalize [-1, 1]
            ])

            dataset = ImageLoader(path, "Real", transforms=transform)
            data_loader = torch.utils.data.DataLoader(dataset,
                                                      batch_size=batch_size,
                                                      shuffle=False)

            wrong = 0
            for idx, (images, labels) in enumerate(data_loader):
                images = images.to(device)
                labels = labels.to(device)
                outputs = net(images)
                _, pred_y = torch.max(outputs.data, 1)
                wrong += (pred_y != labels).sum().item()
            metric = (wrong / len(dataset)) * 100
            print_log = '%s --- pos(real) -> neg(fake) Wrong [%d/%d]  FRR: %.2f%%' % (
                "FRR", wrong, len(dataset), metric)
            print(print_log)

        if do_logwrite:
            f = open('%s/%s_log.txt' % (log_path, "test_"), 'a')
            f.write(print_log)
            f.close()

        return metric
Exemplo n.º 15
0
    def __init__(self):
        self.currentProgram = 'menu' # Current main loop

        WIDTH, HEIGHT = 720,406 # Output video quality

        self.clock = pygame.time.Clock()
        self.timerController = TimerController()

        self.renderer = Renderer(WIDTH, HEIGHT)
        self.imageLoader = ImageLoader()
        self.soundController = SoundController('bgMusic.ogg',[('cutSpell',0.1),('lightingSpell',0.1),('explosionSpell',0.1)],0.05)
        self.mouseController = MouseController()
        self.menu = Menu(self.imageLoader.imageStorage['menu'],self)

        self.player = Player((WIDTH, HEIGHT),self.imageLoader.imageStorage["player"],[(0,2),(2,5),(5,8),(8,11),(11,14),(14,22),(22,37)])
        self.spellController = SpellController(self.mouseController,self.imageLoader.imageStorage,self.renderer, self.player,self.soundController)

        self.amountBars = AmountBars(self.player, None, self.imageLoader.imageStorage['bars'],(WIDTH//2,HEIGHT//2))

        self.boss = None

        self.level = None
        self.playerMovement = None
Exemplo n.º 16
0
def main():
    "main function"
    # optional command line args
    parser = argparse.ArgumentParser()
    parser.add_argument('--train', help='train the NN', action='store_true')
    parser.add_argument('--validate',
                        help='validate the NN',
                        action='store_true')

    args = parser.parse_args()

    # train or validate on IAM dataset
    if args.train or args.validate:
        # load training data, create TF model
        loader = ImageLoader(FilePaths.fnTrain, Model.batchSize, Model.imgSize,
                             Model.maxTextLen)

        # save characters of model for inference mode
        open(FilePaths.fnCharList, 'w').write(str().join(loader.charList))

        # save words contained in dataset into file
        open(FilePaths.fnCorpus, 'w').write(
            str(' ').join(loader.trainWords + loader.validationWords))

        # execute training or validation
        if args.train:
            model = Model(loader.charList)
            train(model, loader)
        elif args.validate:
            model = Model(loader.charList, mustRestore=True)
            validate(model, loader)

    # infer text on test image
    else:
        print(open(FilePaths.fnAccuracy).read())
        model = Model(open(FilePaths.fnCharList).read(), mustRestore=True)
        derive(model, FilePaths.fnDerive, contrast=False)
Exemplo n.º 17
0
    def run_model(self,
                  writer,
                  set_path,
                  preload_epoch,
                  epoch,
                  training=False,
                  shuffle=False,
                  pretrain=False):
        if not training:
            print('VALIDATION')
        image_loader = ImageLoader(batch_size=2, image_dir=set_path)
        if shuffle:
            image_loader.shuffle_data()
        batch_gen = image_loader.getImages()
        for i, (batch_x, batch_y) in enumerate(batch_gen):
            subbatch = 100
            feed = {
                'tf_x:0': batch_x,
                'tf_y:0': batch_y,
                'tf_training:0': training
            }
            if (i % subbatch == 0):
                print('batch ' + str(i) + '/' + str(image_loader.batch_count))
                loss = self.sess.run(self.merged, feed_dict=feed)
                writer.add_summary(loss)
                if (training and i % 1000 == 0):
                    self.save(epoch=preload_epoch + epoch)

            if (not pretrain):
                _ = self.sess.run('train_op_disc', feed_dict=feed)

                _ = self.sess.run('train_op', feed_dict=feed)
            else:
                loss, _ = self.sess.run([self.mse_loss_summ, 'train_mse_op'],
                                        feed_dict=feed)
                writer.add_summary(loss)
Exemplo n.º 18
0
def stream():
    return render_template('stream.html')

@app.route('/cullp')
def cullp():
    return jsonify({'data': render_template('culprit.html', culprits = main.getCulprits())});

@app.route('/compare')
def compare():
    return render_template('compare.html')

@app.route('/ocr')
def ocr():
    return render_template('ocr.html')

@app.route('/nextimage/<index>')
def nextimage(index):
    data = imloader.next(index);
    return data;

cam = VideoCamera(main.videopath)
imloader = ImageLoader('./static/Saved')

@app.route('/video_feed')
def video_feed():
    global play,cam
    return Response(gen(cam,main.process),mimetype='multipart/x-mixed-replace; boundary=frame')

if __name__ == '__main__':
    app.run(host='0.0.0.0', debug=True)
Exemplo n.º 19
0
    if preload_model:
        srgan = SrGan(epochs=epoch)
        srgan.load(epoch=preload_epoch, path='./mse-vgg-gen-model/')
    else:
        srgan = SrGan(epochs=epoch)
    srgan.train(preload_epoch=preload_epoch,
                initialize=not preload_model,
                validation_set_path="./ImageNet/TestImages",
                pretrain=pretrain)
    del srgan

if not train:
    srgan = SrGan(epochs=epoch)
    srgan.load(epoch=preload_epoch, path='./mse-vgg-gen-model/')
    if not demo:
        il = ImageLoader(batch_size=10, image_dir="./ImageNet/TestImages")
    else:
        il = ImageLoader(batch_size=1,
                         image_dir="./ImageNet/DemoImages",
                         shrink=False)
    for i, (input_images, target_images) in enumerate(il.getImages()):
        preds = srgan.predict(input_images)
        for i, img in enumerate(preds):
            pictures = [
                cv2.resize(input_images[i],
                           dsize=(input_images[i].shape[1] * 4,
                                  input_images[i].shape[0] * 4),
                           interpolation=cv2.INTER_NEAREST),
                cv2.resize(input_images[i],
                           dsize=(input_images[i].shape[1] * 4,
                                  input_images[i].shape[0] * 4),
Exemplo n.º 20
0
    def evaluate_image_path(self,
                            log_paths,
                            labels=None,
                            images_in_mosaic=[10, 10]):
        """
        evaluates the images specified in the log file on the trained model
        generated two image mosaics as output, one for positive matches and one for negatives
        :param log_paths:  a list of paths to the image log file(s)
        :param labels:  labels can optionally be associated with each path. 
        If provided it has to be a list of the same length as log_paths
        :param images_in_mosaic: the path to the image log file
        :return: None
        """
        if (labels != None and len(log_paths) != len(labels)):
            print('log_paths and labels length do not match! aborting...')
            return None

        imLoader = ImageLoader(log_paths[0], image_size=self.img_dims)
        data = imLoader.train_set
        entries = imLoader.train_set.shape[0]
        if (labels != None):
            data_labels = np.vstack([labels[0]] * entries)
        else:
            data_labels = imLoader.label_set
        for idx, path in enumerate(log_paths[1:], start=1):
            imLoader = ImageLoader(path, image_size=self.img_dims)
            data = np.concatenate((data, imLoader.train_set), 0)
            entries = imLoader.train_set.shape[0]
            if (labels != None):
                data_labels = np.concatenate(
                    (data_labels, np.vstack([labels[idx]] * entries)), 0)
            else:
                data_labels = np.concatenate((data_labels, imLoader.label_set),
                                             0)

        # shuffle the data
        p = np.random.permutation(data.shape[0])
        data = data[p]
        data_labels = data_labels[p]
        score, predictions = self.predict_image_set(data, data_labels)
        print('Mean accuracy from labelled test set: {0}'.format(score))

        # build a black picture with enough space for
        # our 8 x 8 filters of size 128 x 128, with a 5px margin in between
        img_width = data.shape[2]
        img_height = data.shape[1]
        margin = 5
        height = images_in_mosaic[0] * img_width + (images_in_mosaic[0] -
                                                    1) * margin
        width = images_in_mosaic[1] * img_height + (images_in_mosaic[1] -
                                                    1) * margin
        positive_samples = np.zeros((height, width, 3))
        negative_samples = np.zeros((height, width, 3))

        # fill the picture with our saved filters
        print('Building {:d}x{:d} image using maximum of {:d} samples'.format(
            width, height, images_in_mosaic[0] * images_in_mosaic[1]))

        positive_counter = 0
        negative_counter = 0
        total_images = images_in_mosaic[0] * images_in_mosaic[1]
        for i in trange(data.shape[0]):
            match = predictions[i]
            img = data[i]
            if match > 0.5 and positive_counter < total_images:
                row = positive_counter // images_in_mosaic[1]
                col = positive_counter % images_in_mosaic[1]
                positive_samples[(img_height + margin) *
                                 row:(img_height + margin) * row + img_height,
                                 (img_width + margin) *
                                 col:(img_width + margin) * col +
                                 img_width, :] = img
                positive_counter += 1
            elif match <= 0.5 and negative_counter < total_images:
                row = negative_counter // images_in_mosaic[1]
                col = negative_counter % images_in_mosaic[1]
                negative_samples[(img_height + margin) *
                                 row:(img_height + margin) * row + img_height,
                                 (img_width + margin) *
                                 col:(img_width + margin) * col +
                                 img_width, :] = img
                negative_counter += 1

        p_path = r'.\images\positive_matches.png'
        n_path = r'.\images\negative_matches.png'
        imsave(p_path, positive_samples)
        imsave(n_path, negative_samples)
Exemplo n.º 21
0
class Screen(metaclass=Singleton):
    _delay = 0.3
    _imageLoader = ImageLoader('image/')
    _skills = ImageLoader('image/skills/')
    target = ImageLoader('./', need_scale=False)

    @staticmethod
    def log(text):
        GameStatus().window.add_text(text)

    def __init__(self):
        t = ImageGrab.grab().convert("RGB")
        self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR)

        self.ultLoader = ImageLoader('image/ult/')

        if self.have('topleft'):
            tl = self._imageLoader.get('topleft')
            res = cv2.matchTemplate(self.screen, tl, cv2.TM_CCOEFF_NORMED)

            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x1, y1 = max_loc
            rd = self._imageLoader.get('rightdown')
            res = cv2.matchTemplate(self.screen, rd, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            x2, y2 = max_loc
            # default 989
            GameStatus().y = y2 - y1
            GameStatus().use_Droid4X = True

    def get_cards(self):
        self.capture()
        GameStatus().cards = []
        GameStatus().cards += self.find_list('buster')
        GameStatus().cards += self.find_list('art')
        GameStatus().cards += self.find_list('quick')

    def get_current_level(self):
        c33 = self.chances_of('3_3')
        c23 = self.chances_of('2_3')
        c31 = self.chances_of('1_3')
        if c33 > max(c23, c31):
            self.log('3-3')
            return 3
        elif c23 > max(c33, c31):
            self.log('2-3')
            return 2
        else:
            self.log('1-3')
            return 1

    def find_list(self, name):
        cards = []
        res = cv2.matchTemplate(self.screen, self._imageLoader.get(name),
                                cv2.TM_CCOEFF_NORMED)
        threshold = 0.8
        loc = numpy.where(res >= threshold)
        x = 0
        t = sorted(zip(*loc[::-1]))
        for pt in t:
            if abs(x - pt[0]) > 100 or x == 0:
                x = pt[0]
                cards.append((pt[0], pt[1]))
            else:
                continue
        self.log(name + ': ' + str(len(cards)))

        return cards

    def set_delay(self, delay):
        self._delay = delay

    def capture(self):
        t = ImageGrab.grab().convert("RGB")
        self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR)

    @staticmethod
    def _click(x, y):
        handle = win32gui.GetForegroundWindow()
        x_old, y_old = win32api.GetCursorPos()
        # see https://github.com/asweigart/pyautogui/issues/23
        try:
            pyautogui.click(x, y, 1)
        except FileNotFoundError:
            pass

        win32api.SetCursorPos((x_old, y_old))
        win32gui.SetForegroundWindow(handle)

    def click_on(self, name, repeat=False, loader=_imageLoader):
        if GameStatus().game_stage == GameStage.Stopped:
            return
        self.log('try click ' + name)
        p = loader.get(name)
        max_val = 0
        x, y = 0, 0
        while max_val < 0.8:
            if GameStatus().game_stage == GameStage.Stopped:
                return

            self.capture()
            res = cv2.matchTemplate(self.screen, p, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            self.log(name + ' ' + str(max_val))
            x, y = max_loc
            time.sleep(self._delay)

        m, n, q = p.shape

        x += n / 2
        y += m / 2

        self._click(x, y)

        max_val = 1 if repeat else 0
        while max_val > 0.8:
            if GameStatus().game_stage == GameStage.Stopped:
                return

            time.sleep(1)
            self.capture()
            res = cv2.matchTemplate(self.screen, p, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            if max_val > 0.8:
                self._click(x, y)

    def have(self, name, loader=_imageLoader):
        return self.chances_of(name, loader) > 0.8

    def chances_of(self, name, loader=_imageLoader):
        self.capture()
        p = loader.get(name)
        res = cv2.matchTemplate(self.screen, p, cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        self.log('chances of ' + name + ': ' + str(max_val))
        return max_val
Exemplo n.º 22
0
    T_B_GRAPH_PATH = './graph/' + args.save_model + NOWTIME

BATCH_SIZE = args.batchsize
LEARNING_RATE = 0.0001

NUM_EPOCHS = args.maxepoch

is_write_sub_log = False

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (0.5, ))  # Normalize [-1, 1]
])

# train data set: image 1 x 57 x 116
train_dataset = ImageLoader(TRAIN_DATASET_PATH, transforms=transform)

# data loader: batch_size x 1 x 57 x 116
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=BATCH_SIZE,
                                           shuffle=True)
# data set: image 1 x 57 x 116
test_dataset = ImageLoader(TEST_DATASET_PATH, transforms=transform)

# data loader: batch_size x 1 x 57 x 116
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=False)
# Network
if args.model == "Ann":
    net = Ann(node=args.node, num_classes=2).to(device)  # Real or Fake
Exemplo n.º 23
0
    def write_to_file(self, filename):
        """ Salva os dados em um arquivo usando o loader strategy designado """

        loader = ImageLoader()
        loader.write(self, filename)
Exemplo n.º 24
0
    def load_from_file(self, filename):
        """ Carrega os dados de um arquivo usando o loader strategy designado """

        loader = ImageLoader()
        loader.load(self, filename)
Exemplo n.º 25
0
        self.img = img
        self.orig_image = img.copy()
        return 0

    def nextFunc(self):
        if not self.func_counter == len(self.func_pointers) - 1:
            self.func_counter += 1

    def lastFunc(self):
        if not self.func_counter == 0:
            self.func_counter -= 1


args = parse_args()
mc = MainClass(list_of_funcs=args.functions)
imL = ImageLoader(path_to_data=args.load_path)

img = imL.getCurrentImage()
mc.setImage(img)
cv2.namedWindow('image')

cv2.setMouseCallback('image', mc.draw_circle)

while (1):
    cv2.imshow('image', mc.img)
    k = cv2.waitKey(1) & 0xFF
    if k == ord('m'):
        mc.mode = not mc.mode
    elif k == ord("d"):
        complete_history = {}
        complete_history["augmentations"] = mc.history_object
Exemplo n.º 26
0
def load_image_dataset(image_dir) -> Tuple[pd.DataFrame, pd.DataFrame, pd.Series, pd.Series]:
    loader = ImageLoader(image_dir)
    x_train, x_test, y_train, y_test = loader.load_images()
    x_train = loader.scale_data(x_train)
    x_test = loader.scale_data(x_test)
    return x_train, x_test, y_train, y_test
Exemplo n.º 27
0
from ImageLoader import ImageLoader
from keras.applications import VGG16
from keras.applications import resnet50
from keras.applications.inception_resnet_v2 import InceptionResNetV2

import time
from TransferLearnModel import TransferLearnModel

# loading the data
path = "/put/your/path/here"
# number of training data entries
nt = 37882
# number of validation data entries
nv = 6262

il = ImageLoader(path, model_type='inception')
X_train, Y_train = il.load_train(nt)
X_valid, Y_valid = il.load_valid(nv)

# one hot encode
Y_train = il.onehot_encode(Y_train)
Y_valid = il.onehot_encode(Y_valid)

print("Finish loading the data...")

# base model 1: VGG 16, freezing the first block (3 layers, freeze_num=4)
base_model = VGG16(include_top=False,
                   weights='imagenet',
                   input_shape=X_train.shape[1:])

# base model 2: RestNet50
 def read(self, path, filename):
     loader = ImageLoader(self.imageReader, path, filename)
     return self.lazyImageFactory.create(loader, filename)
Exemplo n.º 29
0
import pytorch_lightning as pl
import torch.utils.data
from pytorch_lightning.loggers import TensorBoardLogger

import config
from ImageLoader import ImageLoader
from Model import Pix2PixModel

if __name__ == '__main__':
    # Set seeds
    pl.seed_everything(config.SEED)

    if config.VALSET_PATH:
        train_dataset = ImageLoader(root_path=config.DATASETS_PATH,
                                    image_size=config.INPUT_SIZE,
                                    transform=config.TRAIN_TRANSFORMS,
                                    training=True,
                                    crops=config.CROPS
                                    )
        val_dataset = ImageLoader(root_path=config.VALSET_PATH,
                                  image_size=config.INPUT_SIZE,
                                  transform=config.DEPLOYMENT_TRANSFORMS,
                                  training=True,
                                  crops=config.CROPS
                                  )
        train_set_size = len(train_dataset)
        val_set_size = len(val_dataset)
    else:
        # Load datasets and create data loaders afterwards
        dataset = ImageLoader(root_path=config.DATASETS_PATH,
                              image_size=config.INPUT_SIZE,
                              transform=config.TRAIN_TRANSFORMS,
Exemplo n.º 30
0
            loss = style_score + content_score
            loss.backward()

            run[0] += 1
            if run[0] % 50 == 0:
                print("run {}:".format(run))
                print('Style Loss : {:4f} Content Loss: {:4f}'.format(
                    style_score.item(), content_score.item()))
                print()

            return style_score + content_score

        optimizer.step(closure)

    input_img.data.clamp_(0, 1)

    return input_img


if __name__ == '__main__':
    img_loader = ImageLoader()
    input_img = img_loader.content_img.clone()
    output = run_style_transfer(cnn, cnn_normalization_mean,
                                cnn_normalization_std, img_loader.content_img,
                                img_loader.style_img, input_img)

    plt.figure()
    img_loader.show_image(output, title='Output Image')

    plt.ioff()
    plt.show()