Пример #1
0
def get_training_data_set():
    """
    Get the train data set.
    """
    image_loader = ImageLoader('data/train-images.idx3-ubyte', 60000)
    label_loader = LabelLoader('data/train-labels.idx1-ubyte', 60000)
    return image_loader.load(), label_loader.load()
Пример #2
0
def get_test_data_set():
    """
    Get the test data set.
    """
    image_loader = ImageLoader('data/t10k-images.idx3-ubyte', 10000)
    label_loader = LabelLoader('data/t10k-labels.idx1-ubyte', 10000)
    return image_loader.load(), label_loader.load()
Пример #3
0
def main():
    # Parameters
    images_directory = './data/images'
    categories_file = '../categories.json'

    if not os.path.isdir(images_directory):
        loader = ImageLoader(output_directory=images_directory,
                             categories_file=categories_file)
        loader.load()

    experience_file = './data/experience-new-ratings.csv'
    number_of_experiences, ids_for_categories = load_experience_data(
        experience_file, n_ratings=2)

    number_of_true_images_for_provider = 8
    number_of_noise_images_for_provider = 2
    number_of_images_in_collage = 6
    output_directory = './data/generated-data'
    features_original_images_file = './data/images/features-images-1'

    image_generator = ImageGenerator(images_directory, categories_file,
                                     features_original_images_file)
    image_generator.generate(
        number_of_true_images_for_provider=number_of_true_images_for_provider,
        number_of_noise_images_for_provider=number_of_noise_images_for_provider,
        number_of_images=number_of_experiences,
        ids=ids_for_categories,
        number_of_images_in_collage=number_of_images_in_collage,
        output_directory=output_directory)

    evaluator = Evaluator(output_directory,
                          features_file='./data/features-generated-data')
    evaluator.visualize(show='ratings')
    evaluator.classify()
Пример #4
0
	def __init__(self, filename, parent = None):
		QDialog.__init__(self, parent)
		
		self.ui = Ui_Dialog()
		self.ui.setupUi(self)

		self.ui.thresholdSlider.valueChanged.connect(self.redraw)
		self.ui.invertedCheckBox.toggled.connect(self.redraw)
		
		self.ui.progress.setEnabled(False)

		fileExt = filename.split('.')[-1]
		
		if fileExt == 'gbr':
			self.ui.imageBox.setVisible(False)
			self._loader = GerberLoader(self)
		elif fileExt in ['png', 'bmp', 'jpg']:
			self.ui.gerberBox.setVisible(False)
			self._loader = ImageLoader(self)

		if not self._loader.load(filename):
			self.close() #FIXME

		self._loader.progress.connect(self.ui.progress.setValue)
		self._loader.loaded.connect(self._loaded)

		if fileExt == 'gbr':
			self.redraw()
		else:
			self.ui.thresholdSlider.setValue(127)
Пример #5
0
def main(**kwargs):
    from types import SimpleNamespace
    _ = SimpleNamespace(**kwargs)

    loader = ImageLoader(_.folder)
    data = loader.setup(datalen=_.datalen)
    dcgan = DCGAN(loader.shape_x, loader.shape_y, loader.channels, data)
    dcgan.train(epochs=_.epochs, batch_size=_.batch_size, save_interval=50)
Пример #6
0
def predict(img_name, model, dir_name, new_size=None):
    image_loader = ImageLoader(dir_name=dir_name)
    img = image_loader.get_one_image(new_size=new_size, path=img_name)
    image_preprocessor = ImagePreprocessing(img_size=new_size)
    gray_scaled = image_loader.rgb2gray(img["data"])
    # sobeled = image_loader.sobel_image(gray_scaled)
    # deskewed = deskew(sobeled, new_size[0])
    hog = image_preprocessor.get_hog(np.uint8(gray_scaled))
    return model.predict([hog])
Пример #7
0
    def __init__(self,  sess , args):

        self.start_time = time.time ()
        self.sess = sess
        self.pool = ImagePool(max_size= args.max_size)
        self.img_size = (args.image_size , args.image_size)
        self.load_size = (args.load_size , args.load_size)
        self.img_channels = args.image_channel
        self.il = ImageLoader (load_size= self.load_size , img_size=self.img_size ,data_dir = args.data_dir ,target_dir = args.target_dir)
        self.data_dir = args.data_dir
        self.target_dir = args.target_dir
        self.video_dir = args.video_dir
        self.sample_dir = args.sample_dir
        self.checkpoint_dir = args.checkpoint_dir
        self.log_dir = args.log_dir
        self.output_data_dir = os.path.join ('results' ,args.output_data_dir)
        self.output_target_dir = os.path.join ('results' ,args.output_target_dir)
        self.gf_dim = args.gf_dim
        self.df_dim = args.df_dim
        self.l1_lambda = args.l1_lambda
        self.learning_rate = args.learning_rate
        self.bata1 = args.bata1
        self.epoch_num = args.epoch_num
        self.batch_size = args.batch_size
        self.data_batch_num = self.il.get_image_num() // self.batch_size
        self.target_batch_num = self.il.get_image_num(is_data= False) // self.batch_size
        self.batch_num = min (self.data_batch_num ,self.target_batch_num)
        self.global_step = 0

        if args.clear_all_memory:
            print('start clear all memory...')

            def clear_files(clear_dir):
                shutil.rmtree (clear_dir)
                os.mkdir (clear_dir)

            clear_files(self.log_dir)
            clear_files(self.checkpoint_dir)
            clear_files(self.sample_dir)

            print ('successfully clear all memory...')


        if not os.path.exists('results'):
            os.makedirs('results')

        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)

        self._build(args)


        self.saver = tf.train.Saver()
Пример #8
0
def load_images(n, img_dir, image_size=None, class_labels=None):
    data = []
    img_labels = []
    image_loader = ImageLoader(dir_name=img_dir)
    image_preprocessor = ImagePreprocessing(img_size=image_size)
    for i in range(n):
        img = image_loader.get_one_image(labels=class_labels,
                                         new_size=image_size)
        gray_scaled = image_loader.rgb2gray(img["data"])
        # sobeled = image_loader.sobel_image(gray_scaled)
        # deskewed = deskew(sobeled, image_size[0])
        hog = image_preprocessor.get_hog(np.uint8(gray_scaled))
        data.append(hog)
        img_labels.append(img["target"])
    return np.array(data), np.array(img_labels)
Пример #9
0
    def __init__(self):
        self.num_class = create_data.coco_num_class
        self.class_names = create_data.coco_class_names
        self.class_colors = create_data.coco_class_colors
        self.class_to_category = create_data.coco_class_to_category
        self.category_to_class = create_data.coco_category_to_class
        self.background_id = self.num_class - 1

        self.basic_model = "vgg16"
        self.num_roi = "100"
        self.bbox_per_class = False

        self.label = "coco/vggnet16"
        self.img_loader = ImageLoader("ilsvrc_2012_mean.npy")
        self.image_shape = [640, 640, 3]

        self.anchor_scales = [50, 100, 200, 300, 400, 500]
        self.anchor_ratios = [[1.0 / math.sqrt(2),
                               math.sqrt(2)], [1.0, 1.0],
                              [math.sqrt(2), 1.0 / math.sqrt(2)]]
        self.num_anchor_type = len(self.anchor_scales) * len(
            self.anchor_ratios)

        ## what is this for ??
        self.anchor_shapes = []
        for s in self.anchor_scales:
            for r in self.anchor_ratios:
                self.anchor_shapes.append([int(s * r[0]), int(s * r[1])])

        self.anchor_stat_file = 'coco_anchor_stats.npz'
        self.global_step = tf.Variable(0, name='global_step', trainable=False)

        # self.build()
        self.saver = tf.train.Saver(max_to_keep=100)
Пример #10
0
 def start_workers(self):
     self.workers = [
         ImageLoader(i, self.worker_stats, self.filename_queue,
                     self.color_buffer_queue)
         for i in range(self.num_workers)
     ]
     for w in self.workers:
         w.start()
Пример #11
0
    def load_sequence(self, sequence_path, fan_id, seq_size=-1):
        if is_running_on_pi == False:
            return

        start = time.time()

        image_loader = ImageLoader()
        path = os.path.join(self.images_folder, 'incoming_images',
                            sequence_path, "fan_" + str(fan_id))
        files = sorted(glob.glob(os.path.join(path, "*.png")))

        for i in range(len(files)):
            circular_image = image_loader.load_to_circular_buffer(files[i])
            if (circular_image.diameter < STRIP_LENGTH):
                assert (
                    "load_sequence(): trying to load image too small for strip length"
                )
            self.sequence.append(circular_image)

        print("Successfuly loaded ", len(files))
        print("loading took ", time.time() - start)
Пример #12
0
    def __init__(self):
        self.img_rows = 256
        self.img_cols = 256
        self.channel = 3

        # self.x_train = input_data.read_data_sets("mnist",\
        # 	one_hot=True).train.images
        # self.x_train = self.x_train.reshape(-1, self.img_rows,\
        # 	self.img_cols, 1).astype(np.float32)
        # self.x_train = self.x_train.reshape(-1, 28,\
        # 	28, 1).astype(np.float32)

        folder = 'C:/Users/lhrfxg/workspace/1902_fastai/datasets/codalab/'

        from image_loader import ImageLoader
        loader = ImageLoader(folder)
        data = loader.setup(datalen=100)
        self.x_train = np.array(data)
        self.DCGAN = DCGAN()
        self.discriminator = self.DCGAN.discriminator_model()
        self.adversarial = self.DCGAN.adversarial_model()
        self.generator = self.DCGAN.generator()
Пример #13
0
def compute_mean_and_std(dir_name: str) -> (np.array, np.array):
    '''
  Compute the mean and the standard deviation of the dataset.

  Note: convert the image in grayscale and then in [0,1] before computing mean
  and standard deviation

  Hints: use StandardScalar (check import statement)

  Args:
  -   dir_name: the path of the root dir
  Returns:
  -   mean: mean value of the dataset (np.array containing a scalar value)
  -   std: standard deviation of th dataset (np.array containing a scalar value)
  '''

    mean = None
    std = None
    loader = ImageLoader(dir_name, split='train')
    imageset = []
    for i in range(len(loader)):
        img, idx = loader[i]
        img = np.array(img).astype(np.float64)
        img *= 1.0 / 255.0
        imageset.append(img.flatten())
    loader = ImageLoader(dir_name, split='test')
    for i in range(len(loader)):
        img, idx = loader[i]
        img = np.array(img).astype(np.float64)
        img *= 1.0 / 255.0
        imageset.append(img.flatten())
    imageset = np.concatenate(imageset, axis=0).reshape(-1, 1)

    scaler = StandardScaler()
    scaler.fit(imageset)
    print(scaler.mean_, scaler.scale_)
    return scaler.mean_, scaler.scale_
Пример #14
0
def main():
    files = [f for f in listdir('input') if isfile(join('input', f))]
    results = []
    for item in files:
        print(item)
        in_path = 'input/' + item
        out_initial = 'results2/init-' + item.split('.')[0] + '.png'
        out_end = 'results2/end-' + item.split('.')[0] + '.png'
        im_reader = ImageLoader(in_path)
        genetics = Genetics(im_reader.image_width, im_reader.image_length, im_reader.base_pixels_array_alpha, im_reader.resized_pixels_array_alpha)
        genetics.get_best_image().save(out_initial)
        genetics.run()
        genetics.get_best_image().save(out_end)
        genetics.full_evaluation()
        results.append("{0} - {1}".format(item, genetics.best_state.fitness))

    print(results)
Пример #15
0
def main():
    image = ImageLoader('blue.jpg').image
    binarized_image = deepcopy(image)
    dithered_image = deepcopy(image)
    print("Images loaded")

    Binarization(binarized_image, 140).binarize()
    print("Binarization done")

    t1 = time()
    Dithering(dithered_image, 2).dither()
    t2 = time()
    print("Dithering done with time: ", t2 - t1)

    image_plotter = ImagePlotter(3)
    image_plotter.plot(image)
    image_plotter.plot(binarized_image)
    image_plotter.plot(dithered_image)
Пример #16
0
    def play(self, length, display_controller):
        if len(self.sequence) == 0:
            print("No sequence loaded! Cancel play")
            return

        if is_running_on_pi == False:
            return

        if display_controller is None:
            display_controller = DisplayController()

        self.strip.begin()
        self.start_time = time.time()
        end_time = length + self.start_time

        black = ImageLoader.black()
        # angle_offset_pixels = (int) (PHYSICAL_ANGLE_OFFSET * 360.0 / PIXELS_IN_CIRCLE)
        # print "offsting image by " + str(angle_offset_pixels)
        print("playing sequence for ", length, "seconds")

        current_image = self.sequence[0]
        counter = 0

        last_switch = time.time()
        diff = 0
        if is_running_on_pi:
            while end_time > display_controller.last_update:
                diff = diff + time.time() - last_switch
                last_switch = time.time()

                current_image = self.get_frame()
                display_controller.update()
                angle = display_controller.estimate_angle()

                self.strip.show(current_image.get_sample_by_angle(angle))
                time.sleep(0.0001)

        else:
            while end_time > timing["last_update"]:
                timing["last_update"] = time.time()

        self.stop()
Пример #17
0
def main():
    needs_convert = not os.path.exists('data_raw')

    loader = ImageLoader('data_raw', 'dat', dtype=np.uint8)

    if needs_convert:
        loader.convert_to_raw('data_jpg')

    loader.load(0.1)

    model = ImageResUp('resup', loader)
    model.create()
    model.compile()
    model.train(10, 80, 10)
    model.generate(16)
Пример #18
0
def train_sgd(n_epoch, n_samples, clf, train_dir, img_size, classes):
    image_loader = ImageLoader(dir_name=train_dir)
    image_preproc = ImagePreprocessing(img_size=img_size)
    for epoch in range(n_epoch):
        for i in range(n_samples):
            img = image_loader.get_one_image(labels=classes, new_size=img_size)
            gray_scaled = image_loader.rgb2gray(img["data"])
            train_sample = image_preproc.simple_preproc(gray_scaled)
            clf.partial_fit([train_sample], [img["target"]],
                            classes=list(classes.values()))
            del img
        if epoch % 10 == 0:
            test_data = []
            test_labels = []
            for _ in range(1000):
                test_img = image_loader.get_one_image(labels=classes,
                                                      new_size=img_size)
                gray_scaled = image_loader.rgb2gray(test_img["data"])
                test_data.append(image_preproc.simple_preproc(gray_scaled))
                test_labels.append(test_img["target"])
            print(
                f"accuracy {accuracy_score(clf.predict(test_data), test_labels)*100}"
            )
            del test_data, test_labels
Пример #19
0
 def __load_images(self):
     """
     Loads the images into imagenet from which it can be queried
     :return: None
     """
     self.imagenet = ImageLoader(self.image_dir)
Пример #20
0
 def load_image(self, item):
     try:
         loader = ImageLoader(self.gui, item['thumbnailImg'])
         loader.load()
     except Exception as ex:
         print ex
Пример #21
0
from image_loader import ImageLoader
import sys
import json
import time
from cvtools import cv_load_image

with open(sys.argv[1], 'r') as f:
    d = json.load(f)

imloader = ImageLoader('opencv', 'bgr')

st = time.time()
for ix, i in enumerate(d['images']):
    img = imloader.load(i['file_name'])
    sys.stdout.write("{}\r".format((time.time() - st) / (ix + 1)))
    sys.stdout.flush()
Пример #22
0
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow_helpers as tfh
from image_loader import ImageLoader
import numpy as np
import datetime
import imageio

width = 72
height = 40

x = tf.placeholder(tf.float32, shape=[None, 40, 72, 3])
y_true = tf.placeholder(tf.float32, shape=[None, 2])
hold_prob = tf.placeholder(tf.float32)

loader = ImageLoader("./FORMAT_72x40")

convo_1a = tfh.convolutional_layer(x, shape=[5, 5, 3, 64])
convo_1b = tfh.convolutional_layer(convo_1a, shape=[5, 5, 64, 64])
convo_1_pooling = tfh.max_pool_2by2(convo_1b)

convo_2a = tfh.convolutional_layer(convo_1_pooling, shape=[5, 5, 64, 64])
convo_2b = tfh.convolutional_layer(convo_2a, shape=[5, 5, 64, 64])
convo_2_pooling = tfh.max_pool_2by2(convo_2b)

convo_2_flat = tf.reshape(convo_2_pooling, [-1, 18 * 10 * 64])

full_layer_one = tf.nn.relu(tfh.normal_full_layer(convo_2_flat, 512))

full_layer_two = tf.nn.relu(tfh.normal_full_layer(full_layer_one, 256))
Пример #23
0
class cycleGAN:
    """
    the cycle model
    """
    def __init__(self,  sess , args):

        self.start_time = time.time ()
        self.sess = sess
        self.pool = ImagePool(max_size= args.max_size)
        self.img_size = (args.image_size , args.image_size)
        self.load_size = (args.load_size , args.load_size)
        self.img_channels = args.image_channel
        self.il = ImageLoader (load_size= self.load_size , img_size=self.img_size ,data_dir = args.data_dir ,target_dir = args.target_dir)
        self.data_dir = args.data_dir
        self.target_dir = args.target_dir
        self.video_dir = args.video_dir
        self.sample_dir = args.sample_dir
        self.checkpoint_dir = args.checkpoint_dir
        self.log_dir = args.log_dir
        self.output_data_dir = os.path.join ('results' ,args.output_data_dir)
        self.output_target_dir = os.path.join ('results' ,args.output_target_dir)
        self.gf_dim = args.gf_dim
        self.df_dim = args.df_dim
        self.l1_lambda = args.l1_lambda
        self.learning_rate = args.learning_rate
        self.bata1 = args.bata1
        self.epoch_num = args.epoch_num
        self.batch_size = args.batch_size
        self.data_batch_num = self.il.get_image_num() // self.batch_size
        self.target_batch_num = self.il.get_image_num(is_data= False) // self.batch_size
        self.batch_num = min (self.data_batch_num ,self.target_batch_num)
        self.global_step = 0

        if args.clear_all_memory:
            print('start clear all memory...')

            def clear_files(clear_dir):
                shutil.rmtree (clear_dir)
                os.mkdir (clear_dir)

            clear_files(self.log_dir)
            clear_files(self.checkpoint_dir)
            clear_files(self.sample_dir)

            print ('successfully clear all memory...')


        if not os.path.exists('results'):
            os.makedirs('results')

        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)

        self._build(args)


        self.saver = tf.train.Saver()


    def _build(self , args):
        """
        build cycleGAN
        :param args: parse_args() , args from main method
        :return:
        """
        self.front_time = time.time ()
        print('building cycleGAN model...')
        with tf.device ('/device:GPU:0'):
        # get the data and target , then divide them into training part and test part
            self.data_train = self.il.load_img_batch(batch_size= self.batch_size,
                                                     num_threads= args.num_threads ,shuffle= True)
            self.data_test = self.il.load_img_batch(batch_size= self.batch_size ,
                                                    num_threads= args.num_threads , shuffle= False , is_train= False)
            self.target_train = self.il.load_img_batch(batch_size= self.batch_size,
                                                       num_threads= args.num_threads ,shuffle= True , is_data= False)
            self.target_test = self.il.load_img_batch(batch_size= self.batch_size,
                                                      num_threads= args.num_threads ,shuffle= False , is_data= False , is_train= False)

        self.coord = tf.train.Coordinator ()
        self.threads = tf.train.start_queue_runners (sess = self.sess)

        print ("got data and target...")
        #definite the training input placeholder
        with tf.device ('/device:GPU:0'):
            with tf.name_scope('input'):
                self.data_input = tf.placeholder(dtype= tf.float32 ,
                                                 shape= [None ,self.img_size[0] ,self.img_size[1] ,self.img_channels] ,
                                                 name ='data_input')
                self.target_input = tf.placeholder (dtype=tf.float32 ,
                                                    shape=[None ,self.img_size[0] ,self.img_size[1] ,self.img_channels] ,
                                                    name='target_input')

                #paint the input images in tensorboard
                tf.summary.image ('data_input' ,self.data_input ,1)
                tf.summary.image ('target_input' ,self.target_input ,1)

        #generative part , first use data to generate target , then use generated target to generative origin data
        with tf.device('/device:GPU:0'):
            self.faker_target = gen_resnet(self.data_input ,gf_dim= self.gf_dim ,reuse= False ,name='gen_data2target')
        with tf.device ('/device:GPU:0'):
            self.faker_data_ = gen_resnet(self.faker_target ,gf_dim= self.gf_dim , reuse= False , name= 'gen_target2data')

        with tf.device ('/device:GPU:0'):
            self.faker_data = gen_resnet(self.target_input ,gf_dim= self.gf_dim ,reuse= True ,name='gen_target2data')
            self.faker_target_ = gen_resnet(self.faker_data , gf_dim= self.gf_dim , reuse= True , name= 'gen_data2target')

        with tf.device ('/device:GPU:0'):
            #sample input means the data or target passed image pool
            with tf.name_scope('sample-input'):
                self.faker_data_input_sample = tf.placeholder(dtype= tf.float32 ,
                                                              shape= [None ,self.img_size[0] ,self.img_size[1] ,self.img_channels] ,
                                                              name = 'faker_data_input_sample')
                self.faker_target_input_sample = tf.placeholder (dtype=tf.float32 ,
                                                                 shape=[None ,self.img_size[0] ,self.img_size[1] ,self.img_channels] ,
                                                                 name='faker_target_input_sample')


                tf.summary.image ('faker-data-input-sample' ,self.faker_data_input_sample ,1)
                tf.summary.image ('faker-target-input-sample' ,self.faker_target_input_sample ,1)

            #calculate the probably of generated data and target
            self.faker_data_disc = disc(self.faker_data , df_dim= self.df_dim , name= 'disc_data')
            self.faker_target_disc = disc(self.faker_target , df_dim= self.df_dim , name= 'disc_target')

            #calculate the probably of origin data and target
            self.data_prob = disc(self.data_input ,df_dim = self.df_dim ,reuse= True ,name='disc_data')
            self.target_prob = disc(self.target_input ,df_dim= self.df_dim ,reuse= True ,name ='disc_target')

            # calculate the probably of generated data and target which have passed image
            self.faker_data_sample_prob = disc(self.faker_data_input_sample ,df_dim = self.df_dim ,reuse= True ,name='disc_data')
            self.faker_target_sample_prob = disc(self.faker_target_input_sample ,df_dim= self.df_dim ,reuse= True ,name ='disc_target')

        # calculate the generator loss
        with tf.device ('/device:GPU:0'):
            self.gen_loss_data2target = mean_square_loss(self.faker_target_disc , tf.ones_like(self.faker_target_disc)) + \
                                        self.l1_lambda * mean_abs_loss(self.data_input ,self.faker_data_) + \
                                        self.l1_lambda * mean_abs_loss(self.target_input ,self.faker_target_)
            self.gen_loss_target2data = mean_square_loss(self.faker_data_disc , tf.ones_like(self.faker_data_disc)) + \
                                        self.l1_lambda * mean_abs_loss(self.data_input ,self.faker_data_) + \
                                        self.l1_lambda * mean_abs_loss(self.target_input ,self.faker_target_)
            self.gen_loss = mean_square_loss(self.faker_data_disc , tf.ones_like(self.faker_data_disc)) + \
                            mean_square_loss(self.faker_target_disc , tf.ones_like(self.faker_target_disc)) + \
                            self.l1_lambda * mean_abs_loss(self.data_input ,self.faker_data_) + \
                            self.l1_lambda * mean_abs_loss(self.target_input ,self.faker_target_)

        #calculate the discriminator loss
        with tf.device ('/device:GPU:0'):
            self.disc_loss_data = mean_square_loss(self.data_prob ,tf.ones_like(self.data_prob))
            self.disc_loss_faker_data = mean_square_loss(self.faker_data_sample_prob ,tf.zeros_like(self.faker_data_sample_prob))
            self.disc_loss_data = (self.disc_loss_data + self.disc_loss_faker_data ) / 2.0

            self.disc_loss_target = mean_square_loss(self.target_prob ,tf.ones_like(self.target_prob))
            self.disc_loss_faker_target = mean_square_loss(self.faker_target_sample_prob ,tf.zeros_like(self.faker_target_sample_prob))
            self.disc_loss_target = (self.disc_loss_target + self.disc_loss_faker_target) / 2.0

        self.disc_loss = self.disc_loss_data + self.disc_loss_target


        #scalar the loss through training
        tf.summary.scalar('gen_loss_data2target' , self.gen_loss_data2target)
        tf.summary.scalar('gen_loss_target2data' , self.gen_loss_target2data)
        tf.summary.scalar('gen_loss' , self.gen_loss)

        tf.summary.scalar('disc_loss_data' , self.disc_loss_data)
        tf.summary.scalar('disc_loss_target' , self.disc_loss_target)

        #definite the test input placeholder
        with tf.name_scope('test-input'):
            self.test_data_input = tf.placeholder (dtype=tf.float32 ,
                                                   shape=[None ,self.img_size[0] ,self.img_size[1] ,self.img_channels] ,
                                                   name='test_data_input')
            self.test_target_input = tf.placeholder (dtype=tf.float32 ,
                                                     shape=[None ,self.img_size[0] ,self.img_size[1] ,self.img_channels] ,
                                                     name='test_target_input')

        with tf.device ('/device:GPU:0'):
        #calculate the test generative data and target
            self.faker_test_target = gen_resnet(self.test_data_input ,gf_dim= self.gf_dim ,reuse= True ,name='gen_data2target')
            self.faker_test_data_ = gen_resnet(self.faker_test_target ,gf_dim= self.gf_dim , reuse= True , name= 'gen_target2data')

        with tf.device ('/device:GPU:0'):
            self.faker_test_data = gen_resnet(self.test_target_input ,gf_dim= self.gf_dim ,reuse= True ,name='gen_target2data')
            self.faker_test_target_ = gen_resnet(self.faker_test_data , gf_dim= self.gf_dim , reuse= True , name= 'gen_data2target')

        #get the variable list of model
        self.vars = tf.trainable_variables()

        #get generative part variable
        self.gen_vars = [v for v in self.vars if 'gen' in v.name]
        self.disc_vars = [v for v in self.vars if 'disc' in v.name]

        # tf.reset_default_graph ()

        print("sussessfully init variable")


        print ('total build time: %02f sec...' % (time.time () - self.front_time))
        print('successfully build cycleGAN model...')



    def train(self , args):

        self.current_learning_rate = tf.placeholder (dtype=tf.float32 ,name='current_learning_rate')
        # definate the optimizer to minimize the discriminate loss and generative loss
        with tf.device ('/device:GPU:0'):
            self.disc_optim = tf.train.AdamOptimizer (learning_rate=self.current_learning_rate ,beta1=self.bata1). \
                minimize (self.disc_loss ,var_list=self.disc_vars)
        with tf.device ('/device:GPU:0'):
            self.gen_optim = tf.train.AdamOptimizer (learning_rate=self.current_learning_rate ,beta1=self.bata1). \
                minimize (self.gen_loss ,var_list=self.gen_vars)

        self.merged = tf.summary.merge_all ()
        self.writer = tf.summary.FileWriter (
            os.path.join (self.log_dir ,datetime.datetime.now ().strftime ("%Y%m%d-%H%M%S")) ,
            self.sess.graph)

        #init the variables of model
        self.sess.run (tf.global_variables_initializer ())
        self.writer = tf.summary.FileWriter (self.log_dir ,self.sess.graph)

        # whether to use checkpoint to support continuous training
        if args.continue_train:
            if self._load(self.checkpoint_dir):
                print('success to load checkpoint')
            else:
                print('fail to load checkpoint')

        print ('start training...')
        #do training epoch
        try:
            for epoch in range(self.global_step // self.batch_num , self.epoch_num):
                # dataA = glob ('./datasets/' + self.data_dir + '_train/%sx%s/*.*' %(self.img_size[0] , self.img_size[1]))
                # dataB = glob ('./datasets/' + self.target_dir + '_train/%sx%s/*.*' %(self.img_size[0] , self.img_size[1]))
                # np.random.shuffle (dataA)
                # np.random.shuffle (dataB)
                for b in range(self.batch_num):
                    # data_batch = list(dataA[b * self.batch_size:(b + 1) * self.batch_size])
                    # target_batch = list(dataB[b * self.batch_size:(b + 1) * self.batch_size])
                    # data_batch = [load_train_data (batch_file ,args.load_size ,args.image_size) for batch_file in
                    #                 data_batch]
                    # target_batch = [load_train_data (batch_file ,args.load_size ,args.image_size) for batch_file in
                    #                 target_batch]
                    # data_batch = np.array (data_batch).astype (np.float32)
                    # target_batch = np.array (target_batch).astype (np.float32)
                    step = epoch * self.batch_num + b
                    # decay learning rate at the last half training process
                    learning_rate = self.learning_rate if epoch < args.epoch_decay \
                        else (self.epoch_num - epoch) / (self.epoch_num - args.epoch_decay) * self.learning_rate
                    #run optimizer process
                    data_batch ,target_batch = self.sess.run([self.data_train ,self.target_train])
                    self.sess.run(self.gen_optim ,feed_dict={self.data_input:data_batch ,
                                                             self.target_input : target_batch ,
                                                             self.current_learning_rate: learning_rate})
                    temp_data ,temp_target = self.sess.run([self.faker_data , self.faker_target],
                                                           feed_dict={self.data_input:data_batch ,self.target_input : target_batch})
                    temp_data ,temp_target = self.pool([temp_data , temp_target])
                    self.sess.run(self.disc_optim ,feed_dict= {self.data_input: data_batch ,self.target_input: target_batch ,
                                                               self.faker_data_input_sample:temp_data ,
                                                               self.faker_target_input_sample: temp_target ,
                                                               self.current_learning_rate: learning_rate})

                    #per merged_frequent times training write into tensorboard
                    if (step + 1) % args.merged_frequent == 0:
                        l1 , l2 , summary = self.sess.run([self.gen_loss , self.disc_loss , self.merged] ,
                                                          feed_dict= {self.data_input: data_batch ,self.target_input: target_batch ,
                                                                      self.faker_data_input_sample:temp_data ,
                                                                      self.faker_target_input_sample: temp_target})
                        self.writer.add_summary (summary ,step)
                        print('global step %s -- epoch %s : gen loss: %s disc loss: %s current learning rate %.06f'
                              %(step + 1 , epoch , l1 , l2 , learning_rate))

                    # per save_frequent times training save a datapoint
                    if (step + 1) % args.save_frequent == 0:
                        self._save(self.checkpoint_dir , step + 1)

                    # per sample_frequent times training sample the results of test examples
                    if (step + 1) % args.sample_frequent == 0:
                        self._sample(args , step + 1)
        except Exception as e:
            print(e)
            self.coord.request_stop (e)

        self.coord.request_stop()
        self.coord.join(self.threads)

        print ('successfully end training...')

    def _save(self ,checkpoint_dir ,step):
        """
        the method of save checkpoints
        :param checkpoint_dir: string , the directory of saving checkpoint
        :param step: int , the global training step
        :return:
        """
        print('saving model...')
        model_name = 'cyclegan.model'
        model_dir = '%s_%s' %(self.data_dir , self.img_size)
        checkpoint_dir = os.path.join (checkpoint_dir ,model_dir)

        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)

        self.saver.save(self.sess , os.path.join(checkpoint_dir , model_name) , global_step= step)
        print('successfully saving model...')


    def _load(self ,checkpoint_dir):
        """
        method of load checkpoint
        :param checkpoint_dir: string , the directory of saving checkpoint
        :return: bool , whether load success
        """
        print('reading checkpoint...')

        model_dir = '%s_%s' %(self.data_dir , self.img_size)
        checkpoint_dir = os.path.join(checkpoint_dir , model_dir)
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
            self.global_step = int(ckpt_name.split('-')[-1])
            self.saver.restore(self.sess , os.path.join(checkpoint_dir , ckpt_name))
            return True
        else:
            return False

    def save_result(self , result ,output_dir ,name):
        """
        method of save result image
        :param result:
        :param output_dir:
        :param name:
        :return:
        """
        result = np.reshape (result ,[self.img_size[0] ,self.img_size[1] ,self.img_channels])
        result = ((result + 1.0) * 127.5).astype (int)
        scipy.misc.imsave (os.path.join (output_dir ,name + '.jpg') ,result)

    def _sample(self , args , step):
        """
        method of sample images
        :param args: arg_parse() , args from main
        :param step: int , the global training step
        :return:
        """
        print('global step %03d -- sample images ...' %step)
        if not os.path.exists (args.sample_dir):
            os.makedirs (args.sample_dir)

        iter_dir = os.path.join(args.sample_dir , 'step-%06d' %step)

        if not os.path.exists (iter_dir):
            os.makedirs (iter_dir)

        test_num = self.il.get_image_num (is_data=True ,is_train=False)
        for i in range (min(test_num , args.sample_num)):
            data_test_batch = self.sess.run (self.data_test)

            original_data ,result_target ,result_data_ = \
                self.sess.run ([self.test_data_input ,self.faker_test_target ,self.faker_test_data_] ,
                               feed_dict={self.test_data_input: data_test_batch})

            self.save_result (original_data ,iter_dir ,'%03d' % i + 'data-original')
            self.save_result (result_target ,iter_dir ,'%03d' % i + 'target-transformed')
            self.save_result (result_data_ ,iter_dir ,'%03d' % i + 'data-rebuild')

        test_num = self.il.get_image_num (is_data=False ,is_train=False)
        for i in range (min(test_num , args.sample_num)):
            target_test_batch = self.sess.run (self.target_test)
            original_target ,result_data ,result_target_ = \
                self.sess.run ([self.test_target_input ,self.faker_test_data ,self.faker_test_target_] ,
                               feed_dict={self.test_target_input: target_test_batch})

            self.save_result (original_target ,iter_dir ,'%03d' % i + 'target-original')
            self.save_result (result_data ,iter_dir ,'%03d' % i + 'data-transformed')
            self.save_result (result_target_ ,iter_dir ,'%03d' % i + 'target-rebuild')

        print('global step %03d -- successfully sample images ...' %step)


    def test(self , args):
        """
        method of test
        :param args:args: arg_parse() , args from main
        :return:
        """
        print("start testing...")
        self.front_time = time.time ()
        self.sess.run (tf.global_variables_initializer ())

        if self._load (self.checkpoint_dir):
            print ('success to load checkpoint')
        else:
            print ('fail to load checkpoint')

        if args.image_transform:
            print('test image...')
            if not os.path.exists (self.output_data_dir):
                os.makedirs (self.output_data_dir)

            if not os.path.exists (self.output_target_dir):
                os.makedirs (self.output_target_dir)

            if args.test_direction_data2target:
                test_num = self.il.get_image_num(is_data= True , is_train= False)
                for i in range(test_num):
                    data_test_batch = self.sess.run(self.data_test)

                    original_data , result_target , result_data_ = \
                        self.sess.run([self.test_data_input , self.faker_test_target , self.faker_test_data_] ,
                                                                 feed_dict={self.test_data_input:data_test_batch})

                    self.save_result(original_data , self.output_target_dir , '%03d' % i + '-original')
                    self.save_result(result_target , self.output_target_dir , '%03d' % i + '-transformed')
                    self.save_result(result_data_ , self.output_target_dir , '%03d' % i + '-rebuild')
                    # result_target = np.reshape (result_target ,[self.img_size[0] ,self.img_size[1] ,self.img_channels])
                    # result_data_ = np.reshape (result_data_ ,[self.img_size[0] ,self.img_size[1] ,self.img_channels])
                    # result_target = (result_target * 255.0).astype(int)
                    # result_data_ = (result_data_ * 255.0).astype(int)
                    # scipy.misc.imsave (os.path.join (self.output_target_dir ,str (i) + '.jpg') ,result_target)
                    # scipy.misc.imsave (os.path.join (self.output_target_dir ,str (i) + '.jpg') ,result_data_)
            else:

                test_num = self.il.get_image_num (is_data= False ,is_train=False)
                for i in range(test_num):
                    target_test_batch = self.sess.run(self.target_test)
                    original_target , result_data , result_target_ = \
                        self.sess.run([self.test_target_input , self.faker_test_data , self.faker_test_target_] ,
                                                                 feed_dict={self.test_target_input:target_test_batch})

                    self.save_result (original_target ,self.output_data_dir ,'%03d' % i + '-original')
                    self.save_result (result_data ,self.output_data_dir ,'%03d' % i + '-transformed')
                    self.save_result (result_target_ ,self.output_data_dir ,'%03d' % i + '-rebuild')
                    # result_data = np.reshape(result_data , [self.img_size[0] , self.img_size[1] , self.img_channels])
                    # result_target_ = np.reshape(result_target_ , [self.img_size[0] , self.img_size[1] , self.img_channels])
                    # result_data = (result_data * 255.0).astype(int)
                    # result_target_ = (result_target_ * 255.0).astype(int)
                    # scipy.misc.imsave (os.path.join (self.output_target_dir ,str (i) + '.jpg') ,result_target_)
            print('successfully test images...')

        else:
            print('test video...')
            self.video_editor = VideoEditor (video_dir=args.video_dir)
            self.video_editor.transform(args.original_video , args.input_video)

            if args.test_direction_data2target:
                self.video_editor.save(input_name = args.input_video ,output_name= args.output_video ,
                                       sess= self.sess ,faker_test_tensor= self.faker_test_target ,
                                       input_tensor= self.test_data_input)
            else:
                self.video_editor.save (input_name=args.input_video ,output_name=args.output_video ,
                                        sess= self.sess , faker_test_tensor= self.faker_test_data ,
                                        input_tensor= self.test_target_input)

            print ('total test time: %02f sec' % (time.time () - self.front_time))
            print('successfully test video...')
Пример #24
0
from image_loader   import ImageLoader
from bunch_of_cells import BunchOfCells

InputFileName = 'babaYaga.jpg'


ImgLdr = ImageLoader(InputFileName)
ImgLdr.LenOfCellSide = 100

# input picture is cut into squares of side 'LenOfCellSide'
MatrixOfCells = ImgLdr.getCells()


# object for image marking
BunchOfCells = BunchOfCells(MatrixOfCells)

# marking parameters setting
BunchOfCells.radius_red  = 49
BunchOfCells.radius_blue = 48
BunchOfCells.red_blue_contrast = 10


# image slices are marked, put back into single picture and saved to a new file
BunchOfCells.markCells()
OutputFileName = InputFileName.split('.')[0]+ '_marked'                                                          + \
                                              '_CellSide'               + str(ImgLdr.LenOfCellSide)             + \
                                              '_radius_red'             + str(BunchOfCells.radius_red)          + \
                                              '_radius_blue'            + str(BunchOfCells.radius_blue)         + \
                                              '_red_blue_contrast'      + str(BunchOfCells.red_blue_contrast)   + \
                                              '.'                       + InputFileName.split('.')[1]
Пример #25
0
# initialise SPI
pi = pigpio.pi()
if not pi.connected:
    print("could not connect SPI")
    exit()
spi = pi.spi_open(0, 500000, 0)  # 243750 487500 975000 1950000

# initialise pin to arduino for flagging synchronisation
SYNC_PIN = 24  # GPIO pin numbers
pi.set_mode(SYNC_PIN, pigpio.INPUT)  # define pulldown/pullup

leds = np.zeros((NUM_LEDS_H, NUM_LEDS_V, 3), dtype='uint8')
mode = 0
submode = [0 for n in range(256)]

iloader = ImageLoader(_num_leds_h=NUM_LEDS_H, _num_leds_v=NUM_LEDS_V)
strmnes = StreamNES(_num_leds_h=NUM_LEDS_H, _num_leds_v=NUM_LEDS_V, _ntsc=True)
abeatd = AudioBeatdetection(_num_leds_h=NUM_LEDS_H, _num_leds_v=NUM_LEDS_V)
pixelflut_queue = Queue()
pixelflut_thread = Thread(target=pixelflut.threaded,
                          args=(pixelflut_queue,'pixelflut_brain.py'))
pixelflut_thread.start()
pixelflut_read = pixelflut_queue.get(timeout=5)

time.sleep(0.4)  # some needed initial delay

def decodeByte2Mode(byte):
    # first two bits code the mode and remaining 6 bits code the submode
    return (byte >> 6) + 1, byte & ~(3 << 6)

def read_mode_SPI():
Пример #26
0
 np.random.seed(42)
 MODEL_DIR = os.path.join(ROOT_DIR, 'models', args.name)
 CLF_NAME = args.name.split('-')[0]
 os.makedirs(MODEL_DIR, exist_ok=True)
 all_files = sorted(glob(os.path.join(TRAIN_DIR, '*', '*')))
 all_labels = [
     os.path.split(os.path.dirname(file))[-1] for file in all_files
 ]
 train_files, val_files = train_test_split(all_files,
                                           test_size=0.1,
                                           random_state=42,
                                           stratify=all_labels)
 MODEL_PATH = os.path.join(MODEL_DIR, CLF_NAME)
 train_loader = ImageLoader(files=train_files,
                            aug_config=args.augmentation,
                            balance=args.balance,
                            batch_size=args.batch_size,
                            clf_name=CLF_NAME,
                            crops=args.crop)
 val_loader = ImageLoader(files=val_files,
                          mode='val',
                          batch_size=args.batch_size,
                          clf_name=CLF_NAME,
                          crops=args.crop)
 assert train_loader.n_class == val_loader.n_class
 lr_cb = ReduceLROnPlateau(monitor='val_categorical_accuracy',
                           factor=0.2,
                           patience=3,
                           verbose=1,
                           min_lr=2e-5)
 log_cb = LoggerCallback(log_path=MODEL_PATH)
 model_compile_args = {
Пример #27
0
def main():

    global args

    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)

    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    if args.visdom:
        global plotter
        plotter = VisdomLinePlotter(env_name=args.name)

    global meta
    meta = MetaLoader(args.data_path, args.dataset)

    global attributes
    attributes = [i for i in range(len(meta.data['ATTRIBUTES']))]

    backbone = resnet.resnet50_feature()
    enet = get_model(args.model)(backbone,
                                 n_attributes=len(attributes),
                                 embedding_size=args.dim_embed)
    tnet = get_model('Tripletnet')(enet)
    if args.cuda:
        tnet.cuda()

    criterion = torch.nn.MarginRankingLoss(margin=args.margin)
    n_parameters = sum([p.data.nelement() for p in tnet.parameters()])
    logger.info('  + Number of params: {}'.format(n_parameters))

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            logger.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch'] + 1
            mAP = checkpoint['prec']
            tnet.load_state_dict(checkpoint['state_dict'])
            logger.info(
                "=> loaded checkpoint '{}' (epoch {} mAP on validation set {})"
                .format(args.resume, checkpoint['epoch'], mAP))
        else:
            logger.info("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    if args.test:
        test_candidate_loader = torch.utils.data.DataLoader(
            ImageLoader(args.data_path,
                        args.dataset,
                        'filenames_test.txt',
                        'test',
                        'candidate',
                        transform=transforms.Compose([
                            transforms.Resize(224),
                            transforms.CenterCrop(224),
                            transforms.ToTensor(),
                            normalize,
                        ])),
            batch_size=args.batch_size,
            shuffle=True,
            **kwargs)

        test_query_loader = torch.utils.data.DataLoader(
            ImageLoader(args.data_path,
                        args.dataset,
                        'filenames_test.txt',
                        'test',
                        'query',
                        transform=transforms.Compose([
                            transforms.Resize(224),
                            transforms.CenterCrop(224),
                            transforms.ToTensor(),
                            normalize,
                        ])),
            batch_size=args.batch_size,
            shuffle=True,
            **kwargs)

        test_mAP = test(test_candidate_loader, test_query_loader, enet)
        sys.exit()

    parameters = filter(lambda p: p.requires_grad, tnet.parameters())
    optimizer = optim.Adam(parameters, lr=args.lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=args.step_size,
                                                gamma=args.decay_rate)

    train_loader = torch.utils.data.DataLoader(TripletImageLoader(
        args.data_path,
        args.dataset,
        args.num_triplets,
        transform=transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)

    val_candidate_loader = torch.utils.data.DataLoader(
        ImageLoader(args.data_path,
                    args.dataset,
                    'filenames_valid.txt',
                    'valid',
                    'candidate',
                    transform=transforms.Compose([
                        transforms.Resize(224),
                        transforms.CenterCrop(224),
                        transforms.ToTensor(),
                        normalize,
                    ])),
        batch_size=args.batch_size,
        shuffle=True,
        **kwargs)

    val_query_loader = torch.utils.data.DataLoader(ImageLoader(
        args.data_path,
        args.dataset,
        'filenames_valid.txt',
        'valid',
        'query',
        transform=transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   **kwargs)

    logger.info("Begin training on {} dataset.".format(args.dataset))

    best_mAP = 0
    start = time.time()
    for epoch in range(args.start_epoch, args.epochs + 1):
        # train for one epoch
        train(train_loader, tnet, criterion, optimizer, epoch)
        train_loader.dataset.refresh()
        # evaluate on validation set
        mAP = test(val_candidate_loader, val_query_loader, enet, epoch)

        # remember best meanAP and save checkpoint
        is_best = mAP > best_mAP
        best_mAP = max(mAP, best_mAP)
        save_checkpoint(
            {
                'epoch': epoch,
                'state_dict': tnet.state_dict(),
                'prec': mAP,
            }, is_best)

        # update learning rate
        scheduler.step()
        for param in optimizer.param_groups:
            logging.info('lr:{}'.format(param['lr']))
            break

    end = time.time()
    duration = int(end - start)
    minutes = (duration // 60) % 60
    hours = duration // 3600
    logger.info('training time {}h {}min'.format(hours, minutes))
Пример #28
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--dataset_ratio",
        type=float,
        default=1,
        help="the purcentage of data used in the dataset (default: 1)")
    parser.add_argument("--image_size",
                        type=int,
                        default=512,
                        help="size of the input image (default: 512)")
    parser.add_argument("--depth",
                        type=int,
                        default=6,
                        help="depth of the autoencoder (default: 6)")
    parser.add_argument(
        "--num_block",
        type=int,
        default=3,
        help="number of blocks of the autoencoder (default: 3)")
    parser.add_argument("--epoch",
                        type=int,
                        default=1,
                        help="number of epoch (default: 1)")
    parser.add_argument("--batch",
                        type=int,
                        default=100,
                        help="number of batch (default: 100)")
    parser.add_argument("--valpct",
                        type=float,
                        default=0.2,
                        help="proportion of test data (default: 0.2)")
    parser.add_argument("--num_threads",
                        type=int,
                        default=1,
                        help="number of thread used (default: 1)")
    parser.add_argument("--create_csv",
                        type=bool,
                        default=False,
                        help="create or not csv file (default: False)")
    parser.add_argument("--log",
                        default=False,
                        action='store_true',
                        help="Write log or not (default: False)")
    parser.add_argument("--l2_reg",
                        type=int,
                        default=0.001,
                        help="L2 regularisation (default: 0.001)")

    args = parser.parse_args()

    # if args.create_csv:
    #     g_csv.generate_csv(DATA_PATH + CSV_NAME, args.num_var,
    #                        args.num_const, args.num_prob)

    valid_ratio = args.valpct  # Going to use 80%/20% split for train/valid

    data_transforms = transforms.Compose([ToTensor(), Normalize()])

    full_dataset = ImageLoader(csv_file_path=LABEL_FILE_PATH,
                               image_directory=IMAGE_FOLDER_PATH,
                               mask_directory=MASK_FOLDER_PATH,
                               dataset_size=int(args.dataset_ratio *
                                                DATASET_SIZE),
                               image_size=args.image_size,
                               transform=data_transforms)

    nb_train = int((1.0 - valid_ratio) * len(full_dataset))
    nb_test = len(full_dataset) - nb_train

    print("Size of full data set: ", len(full_dataset))
    print("Size of training data: ", nb_train)
    print("Size of testing data:  ", nb_test)
    train_dataset, test_dataset = torch.utils.data.dataset.random_split(
        full_dataset, [nb_train, nb_test])

    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=args.batch,
                              shuffle=True,
                              num_workers=args.num_threads)

    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=args.batch,
                             shuffle=True,
                             num_workers=args.num_threads)
    # TODO params
    # num_param = args.num_var + args.num_const + (args.num_var*args.num_const)
    model = AutoEncoder(num_block=args.num_block, depth=args.depth)
    # print("Network architechture:\n", model)

    use_gpu = torch.cuda.is_available()
    if use_gpu:
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    model.to(device)

    # f_loss = nn.BCEWithLogitsLoss()
    f_loss = loss.Custom_loss()

    # define optimizer
    optimizer = torch.optim.Adam(model.parameters(), weight_decay=args.l2_reg)

    # Make run directory
    run_name = "run-"
    LogManager = lw.LogManager(LOG_DIR, run_name)
    run_dir_path, num_run = LogManager.generate_unique_dir()

    # setup model checkpoint
    path_model_check_point = run_dir_path + MODEL_DIR
    if not os.path.exists(path_model_check_point):
        os.mkdir(path_model_check_point)
    model_checkpoint = ModelCheckpoint(path_model_check_point + BEST_MODELE,
                                       model)

    # top_logdir = LOG_DIR + FC1
    # if not os.path.exists(top_logdir):
    #     os.mkdir(top_logdir)
    # model_checkpoint = ModelCheckpoint(top_logdir + BEST_MODELE, model)

    if args.log:
        print("Writing log")
        # generate unique folder for new run
        tensorboard_writer = SummaryWriter(log_dir=run_dir_path,
                                           filename_suffix=".log")
        LogManager.set_tensorboard_writer(tensorboard_writer)
        LogManager.summary_writer(model, optimizer)

        # write short description of the run
        run_desc = "Epoch{}".format(args.epoch)
        log_file_path = LOG_DIR + run_desc + "Run{}".format(num_run) + ".log"
        # LogManager.summary_writer(model, optimizer)

        # write short description of the run
        # run_desc = "Epoch{}Reg{}Var{}Const{}CLoss{}Dlayer{}Alpha{}".format(
        #     args.num_epoch,
        #     args.l2_reg,
        #     args.num_var,
        #     args.num_const,
        #     args.custom_loss,
        #     args.num_deep_layer,
        #     args.alpha)

        # log_file_path = LOG_DIR + "Run{}".format(num_run) + run_desc + ".log"
        # log_file_path = lw.generate_unique_logpath(LOG_DIR, "Linear")

    with tqdm(total=args.epoch) as pbar:
        for t in range(args.epoch):
            pbar.update(1)
            pbar.set_description("Epoch {}".format(t))
            # print(DIEZ + "Epoch Number: {}".format(t) + DIEZ)
            train_loss, train_acc = train(model, train_loader, f_loss,
                                          optimizer, device, LogManager)

            progress(train_loss, train_acc)
            # time.sleep(0.5)

            val_loss, val_acc = test(model,
                                     test_loader,
                                     f_loss,
                                     device,
                                     log_manager=LogManager)
            print(" Validation : Loss : {:.4f}, Acc : {:.4f}".format(
                val_loss, val_acc))

            model_checkpoint.update(val_loss)

            # lw.write_log(log_file_path, val_acc, val_loss, train_acc, train_loss)

            if args.log:
                tensorboard_writer.add_scalars("Loss/", {
                    'train_loss': train_loss,
                    'val_loss': val_loss
                }, t)
            # tensorboard_writer.add_scalar(METRICS + 'train_loss', train_loss, t)
            # tensorboard_writer.add_scalar(METRICS + 'train_acc',  train_acc, t)
            # tensorboard_writer.add_scalar(METRICS + 'val_loss', val_loss, t)
            # tensorboard_writer.add_scalar(METRICS + 'val_acc',  val_acc, t)
            LogManager.write_log(log_file_path, val_acc, val_loss, train_acc,
                                 train_loss)

    model.load_state_dict(torch.load(path_model_check_point + BEST_MODELE))
    print(DIEZ + " Final Test " + DIEZ)
    _, _ = test(model,
                train_loader,
                f_loss,
                device,
                final_test=True,
                log_manager=LogManager,
                txt="training")

    test_loss, test_acc = test(model,
                               test_loader,
                               f_loss,
                               device,
                               log_manager=LogManager,
                               final_test=True)

    print(" Test       : Loss : {:.4f}, Acc : {:.4f}".format(
        test_loss, test_acc))
Пример #29
0
class HttpData:

    mycookie = None
    imageloader = ImageLoader()

    def load(self, url):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            cook = self.mycookie if self.cookie == None else self.cookie
            response = xbmcup.net.http.get(url,
                                           cookies=cook,
                                           verify=False,
                                           proxies=PROXIES)
            if (self.cookie == None):
                self.mycookie = response.cookies
        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if (response.status_code == 200):
                if (self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None

    def post(self, url, data):
        try:
            data
        except:
            data = {}
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            cook = self.mycookie if self.cookie == None else self.cookie
            response = xbmcup.net.http.post(url,
                                            data,
                                            cookies=cook,
                                            verify=False,
                                            proxies=PROXIES)

            if (self.cookie == None):
                self.mycookie = response.cookies

        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            if (response.status_code == 200):
                if (self.auth.check_auth(response.text) == False):
                    self.auth.autorize()
                return response.text
            return None

    def ajax(self, url, data={}, referer=False):
        try:
            self.auth = Auth()
            self.cookie = self.auth.get_cookies()
            headers = {'X-Requested-With': 'XMLHttpRequest'}
            if (referer):
                headers['Referer'] = referer

            cook = self.mycookie if self.cookie == None else self.cookie
            if (len(data) > 0):
                response = xbmcup.net.http.post(url,
                                                data,
                                                cookies=cook,
                                                headers=headers,
                                                verify=False,
                                                proxies=PROXIES)
            else:
                response = xbmcup.net.http.get(url,
                                               cookies=cook,
                                               headers=headers,
                                               verify=False,
                                               proxies=PROXIES)

            if (self.cookie == None):
                self.mycookie = response.cookies

        except xbmcup.net.http.exceptions.RequestException:
            print traceback.format_exc()
            return None
        else:
            return response.text if response.status_code == 200 else None

    def get_my_news(self,
                    url,
                    page,
                    idname='dle-content',
                    nocache=False,
                    search="",
                    itemclassname="shortstory"):
        page = int(page)

        url = SITE_URL + "/api/notifications/get"

        if (page > 0 and search == ''):
            page += 1
        else:
            page = 1

        post_data = {'page': page}

        html = self.ajax(url, post_data, SITE_URL + '/')
        #print html.decode('utf8')
        if not html:
            return None, {'page': {'pagenum': 0, 'maxpage': 0}, 'data': []}
        result = {'page': {'pagenum': page, 'maxpage': 10000}, 'data': []}

        try:
            json_result = json.loads(html)
            result['page']['maxpage'] = len(json_result['message']['items'])

            for item_news in json_result['message']['items']:
                movie_name = item_news['data']['movie_name']
                movie_url = item_news['data']['movie_link']
                movie_id = item_news['id']
                quality_s = item_news['date_string']
                dop_info = 'S' + str(item_news['data']['season']) + 'E' + str(
                    item_news['data']['episode'])
                not_movie = False

                result['data'].append({
                    'url':
                    movie_url,
                    'id':
                    movie_id,
                    'not_movie':
                    not_movie,
                    'quality':
                    '[COLOR ff3BADEE]' + quality_s + '[/COLOR]',
                    'year':
                    '[COLOR ffFFB119]' + dop_info + '[/COLOR]',
                    'name':
                    movie_name.strip(),
                    'img':
                    None
                })
        except:
            print traceback.format_exc()

        if (nocache):
            return None, result
        else:
            return cache_minutes, result

    def get_movies(self,
                   url,
                   page,
                   idname='dle-content',
                   nocache=False,
                   search="",
                   itemclassname="shortstory"):
        page = int(page)

        if (page > 0 and search == ''):
            url = SITE_URL + "/" + url.strip('/') + "/page/" + str(page + 1)
        else:
            url = SITE_URL + "/" + url.strip('/')

        # print url

        if (search != ''):
            html = self.ajax(url)
        else:
            html = self.load(url)

        if not html:
            return None, {'page': {'pagenum': 0, 'maxpage': 0}, 'data': []}
        result = {'page': {}, 'data': []}
        soup = xbmcup.parser.html(self.strip_scripts(html))

        if (search != ''):
            result['page'] = self.get_page_search(soup)
        else:
            result['page'] = self.get_page(soup)

        if (idname != ''):
            center_menu = soup.find('div', id=idname)
        else:
            center_menu = soup
        try:
            for div in center_menu.find_all('article', class_=itemclassname):
                href = div.find('div', class_='short')  #.find('a')

                movie_name = div.find('div', class_='full').find(
                    'h2', class_='name').find('a').get_text()
                movie_url = href.find('a', class_='watch').get('href')
                movie_id = re.compile('/([\d]+)-', re.S).findall(movie_url)[0]

                not_movie = True
                try:
                    not_movie_test = div.find('span',
                                              class_='not-movie').get_text()
                except:
                    not_movie = False

                try:
                    quality = div.find('div', class_='full').find(
                        'div', class_='quality').get_text().strip()
                except:
                    quality = ''

                dop_information = []
                try:
                    likes = soup.find(class_='like',
                                      attrs={
                                          'data-id': movie_id
                                      }).find('span').get_text()
                    i_likes = int(likes)
                    if i_likes != 0:
                        if i_likes > 0:
                            likes = '[COLOR ff59C641]' + likes + '[/COLOR]'
                        else:
                            likes = '[COLOR ffDE4B64]' + likes + '[/COLOR]'
                        dop_information.append(likes)
                except:
                    pass

                try:
                    year = div.find(
                        'div',
                        class_='item year').find('a').get_text().strip()
                    dop_information.append(year)
                except:
                    pass

                try:
                    genre = div.find('div', class_='category').find(
                        class_='item-content').get_text().strip()
                    dop_information.append(genre)
                except:
                    print traceback.format_exc()

                information = ''
                if (len(dop_information) > 0):
                    information = '[COLOR white][' + ', '.join(
                        dop_information) + '][/COLOR]'

                movieposter = self.format_poster_link(
                    href.find('img',
                              class_='poster poster-tooltip').get('src'))

                result['data'].append({
                    'url':
                    movie_url,
                    'id':
                    movie_id,
                    'not_movie':
                    not_movie,
                    'quality':
                    self.format_quality(quality),
                    'year':
                    information,
                    'name':
                    movie_name.strip(),
                    'img':
                    None if not movieposter else movieposter
                })
        except:
            print traceback.format_exc()

        if (nocache):
            return None, result
        else:
            return cache_minutes, result

    def decode_base64(self, encoded_url):
        codec_a = ("l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M",
                   "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e",
                   "y", "=")
        codec_b = ("w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2",
                   "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o",
                   "L", "h")

        i = 0
        for a in codec_a:
            b = codec_b[i]
            i += 1
            encoded_url = encoded_url.replace(a, '___')
            encoded_url = encoded_url.replace(b, a)
            encoded_url = encoded_url.replace('___', b)
        return base64.b64decode(encoded_url)

    def decode_base64_2(self, encoded_url):
        tokens = ("//Y2VyY2EudHJvdmEuc2FnZ2V6emE=",
                  "//c2ljYXJpby4yMi5tb3ZpZXM=", "//a2lub2NvdmVyLnc5OC5uamJo")

        def _log(s):
            with open(xbmc.translatePath('special://temp/filmix_net_url.log'),
                      'a') as f:
                try:
                    f.write(unicode(s).encode('utf-8'))
                except:
                    f.write(s)
                f.write('\n')

        clean_encoded_url2 = encoded_url[2:].replace("\/", "/")

        #for token in tokens:
        #    clean_encoded_url = clean_encoded_url2.replace(token, "")
        clean_encoded_url = musor.Clear_Musor24(clean_encoded_url2)
        #clean_encoded_url = None
        if clean_encoded_url:
            try:
                return base64.b64decode(clean_encoded_url)
            except:
                clean_encoded_url = None

        if clean_encoded_url is None:
            clean_encoded_url = musor.Clear_Musor(clean_encoded_url2)

        try:
            return base64.b64decode(clean_encoded_url)
        except:
            xbmc.log(u'filmix.net encoded_url:' + encoded_url, xbmc.LOGERROR)
            _log(encoded_url)
            xbmcgui.Dialog().ok(
                'Filmix.Net', 'Не удалось декодировать ссылку (см. kodi.log):',
                encoded_url)

    def decode_unicode(self, encoded_url):
        def grouper(n, iterable, fillvalue=None):
            "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
            args = [iter(iterable)] * n
            return izip_longest(fillvalue=fillvalue, *args)

        _ = (encoded_url[1:] if encoded_url.find('#') != -1 else encoded_url)
        tokens = map(lambda items: '\u0' + ''.join(items), grouper(3, _))
        return ''.join(tokens).decode('unicode_escape')

    def decode_direct_media_url(self, encoded_url, checkhttp=False):
        if (checkhttp == True and (encoded_url.find('http://') != -1
                                   or encoded_url.find('https://') != -1)):
            return False

        xbmc.log(u'Filmix.Net try decode url:' + encoded_url)
        try:
            if encoded_url.find('#') != -1:
                if encoded_url[:2] == '#2':
                    return self.decode_base64_2(encoded_url)
                else:
                    return self.decode_unicode(encoded_url)
            else:
                return self.decode_base64(encoded_url)
        except:
            return False

    def format_poster_link(self, link):
        # fix for .cc
        r_link = link.replace('https://filmix.co', SITE_URL)
        # fix for .live .co .net
        if r_link.find(SITE_URL) != -1:
            r_link = r_link
        elif '://' in r_link:
            r_link = r_link
        else:
            r_link = SITE_URL + r_link

        if PROXIES:
            if DOWNLOAD_POSTERS_VIA_PROXY:
                self.imageloader.load_to_cache(r_link)
            else:
                r_link = ''

        return r_link

    def format_direct_link(self, source_link, q):
        # regex = re.compile("\[([^\]]+)\]", re.IGNORECASE)
        # return regex.sub(q, source_link)
        for link in source_link:
            if link[0].find(q) != -1:
                _or_ = link[1].find(b' or ')
                if _or_ != -1:
                    return link[1][_or_ + 4:]
                else:
                    return link[1]

    def get_qualitys(self, source_link):
        try:
            avail_quality = re.compile("\[([^\]]+)\]",
                                       re.S).findall(source_link)
            # return avail_quality.split(',')
            return avail_quality
        except:
            # return '0'.split()
            return []

    def get_collections_info(self):
        html = self.load(COLLECTIONS_URL)
        collectionsInfo = []

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        collections = soup.find_all('a', class_='poster-link poster-hover')
        for collection in collections:
            url_collection = collection.get('href').replace(SITE_URL, '')
            obj_poster = collection.find(class_='poster')
            title_collection = obj_poster.get('alt')
            img_collection = self.format_poster_link(obj_poster.get('src'))
            if img_collection.find('/none.png') > 0:
                img_collection = cover.treetv

            collectionsInfo.append({
                'url': url_collection,
                'img': img_collection,
                'title': title_collection
            })

        return collectionsInfo

    def get_movie_info(self, url):
        html = self.load(url)

        movieInfo = {}

        movieInfo['page_url'] = url

        movieInfo['no_files'] = None
        movieInfo['episodes'] = True
        movieInfo['movies'] = []
        movieInfo['resolutions'] = []

        if not html:
            movieInfo['no_files'] = 'HTTP error'
            return movieInfo

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        try:
            movieInfo['is_proplus'] = len(soup.find('span', class_='proplus'))
        except:
            movieInfo['is_proplus'] = 0

        try:
            try:
                film_id = re.compile('film_id ?= ?([\d]+);', re.S).findall(
                    html)[0].decode('string_escape').decode('utf-8')
                movieInfo['movie_id'] = int(film_id)
                js_string = self.ajax(SITE_URL + '/api/movies/player_data',
                                      {'post_id': film_id}, url)
                player_data = json.loads(js_string, 'utf-8')
                # player_data = player_data['message']['translations']['flash']
                # player_data = player_data['message']['translations']['html5']
                player_data = player_data['message']['translations']['video']
                if player_data == []:
                    movieInfo['no_files'] = xbmcup.app.lang[34026].encode(
                        'utf8')
            except:
                movieInfo['no_files'] = xbmcup.app.lang[34026].encode('utf8')
                raise

            serie_q = re.compile('(\d+)', re.S)
            serie_num = re.compile('s(\d+)e(\d+)', re.S)

            for translate in player_data:
                js_string = self.decode_direct_media_url(
                    player_data[translate], True)

                if (js_string == False):
                    continue

                if (js_string.find('.txt') != -1):
                    playlist = self.decode_direct_media_url(
                        self.load(js_string))
                    movies = json.loads(playlist, 'utf-8')

                    try:
                        folders = movies[0]['folder']
                    except:
                        try:  # fix bug site same serials("Ekaterina")
                            folders = movies[1]['folder']
                            movies.pop(0)
                        except:
                            movies = [{'folder': movies, 'title': 'Season 1'}]

                    for season in movies:
                        current_movie = {
                            'folder_title':
                            season['title'] + ' (' + translate + ')',
                            'movies': {},
                            'translate': translate
                        }

                        for movie in season['folder']:
                            avail_quality = self.get_qualitys(movie['file'])
                            array_links = re.compile(b'\[([^\]]+)\]([^,]+)',
                                                     re.S).findall(
                                                         movie['file'])
                            for q in avail_quality:

                                if (q == ''): continue

                                direct_link = self.format_direct_link(
                                    array_links,
                                    q) if q != 0 else movie['file']

                                serie_num_res = serie_num.findall(movie['id'])

                                try:
                                    iseason = int(serie_num_res[0][0])
                                except:
                                    iseason = 0

                                try:
                                    iserieId = int(serie_num_res[0][1])
                                except:
                                    iserieId = 0

                                if (q == '4K UHD'):
                                    qq = '2160'
                                elif (q == '2K'):
                                    qq = '1440'
                                else:
                                    qq = serie_q.findall(q)[0]

                                try:
                                    current_movie['movies'][qq].append(
                                        [direct_link, iseason, iserieId])
                                except:
                                    current_movie['movies'][qq] = []
                                    current_movie['movies'][qq].append(
                                        [direct_link, iseason, iserieId])

                                current_movie['season'] = iseason

                        movieInfo['movies'].append(current_movie)

                elif (js_string.find('http://') != -1
                      or js_string.find('https://') != -1):
                    avail_quality = self.get_qualitys(js_string)
                    array_links = re.compile(b'\[([^\]]+)\]([^,]+)',
                                             re.S).findall(js_string)
                    current_movie = {
                        'folder_title': translate,
                        'translate': translate,
                        'movies': {}
                    }
                    for q in avail_quality:
                        if (q == ''): continue
                        direct_link = self.format_direct_link(
                            array_links, q) if q != 0 else js_string

                        if (q == '4K UHD'):
                            qq = '2160'
                        elif (q == '2K'):
                            qq = '1440'
                        else:
                            qq = serie_q.findall(q)[0]

                        try:
                            current_movie['movies'][qq].append(
                                [direct_link, 1, 1])
                        except:
                            current_movie['movies'][qq] = []
                            current_movie['movies'][qq].append(
                                [direct_link, 1, 1])

                    movieInfo['movies'].append(current_movie)

            movieInfo['title'] = soup.find('h1', class_='name').get_text()
            try:
                movieInfo['originaltitle'] = soup.find(
                    'div', class_='origin-name').get_text().strip()
            except:
                movieInfo['originaltitle'] = ''

            try:
                r_kinopoisk = soup.find(
                    'span',
                    class_='kinopoisk btn-tooltip icon-kinopoisk').find(
                        'p').get_text().strip()
                if float(r_kinopoisk) == 0: r_kinopoisk = ''
            except:
                r_kinopoisk = ''

            try:
                r_imdb = soup.find('span',
                                   class_='imdb btn-tooltip icon-imdb').find(
                                       'p').get_text().strip()
                movieInfo['ratingValue'] = float(r_imdb)
                movieInfo['ratingCount'] = r_imdb
            except:
                r_imdb = ''
                movieInfo['ratingValue'] = 0
                movieInfo['ratingCount'] = 0

            if r_kinopoisk != '':
                r_kinopoisk = ' [COLOR orange]���������[/COLOR] : '.decode(
                    'cp1251') + r_kinopoisk

            if movieInfo['ratingValue'] != 0:
                r_imdb = ' [COLOR yellow]IMDB[/COLOR] : ' + r_imdb
            else:
                r_imdb = ''

            s_rating = r_kinopoisk + r_imdb + ' \n '

            try:
                movieInfo['description'] = s_rating + soup.find(
                    'div', class_='full-story').get_text().strip()
            except:
                movieInfo['description'] = ''

            try:
                movieInfo['fanart'] = self.format_poster_link(
                    soup.find('ul',
                              class_='frames-list').find('a').get('href'))
            except:
                movieInfo['fanart'] = ''
            try:
                movieInfo['cover'] = self.format_poster_link(
                    soup.find('a', class_='fancybox').get('href'))
            except:
                movieInfo['cover'] = ''

            try:
                movieInfo['genres'] = []
                genres = soup.find_all(attrs={'itemprop': 'genre'})
                for genre in genres:
                    movieInfo['genres'].append(genre.get_text().strip())
                movieInfo['genres'] = ' / '.join(
                    movieInfo['genres']).encode('utf-8')
            except:
                movieInfo['genres'] = ''

            try:
                movieInfo['year'] = soup.find(
                    'div', class_='item year').find('a').get_text()
            except:
                movieInfo['year'] = ''

            try:
                movieInfo['durarion'] = soup.find(
                    'div', class_='item durarion').get('content')
                movieInfo['durarion'] = int(movieInfo['durarion']) * 60
            except:
                movieInfo['durarion'] = ''

            movieInfo['is_serial'] = soup.find(
                'div', class_='item xfgiven_added') is not None

            # try:
            # movieInfo['ratingValue'] = float(soup.find(attrs={'itemprop' : 'ratingValue'}).get_text())
            # except:
            # movieInfo['ratingValue'] = 0

            # try:
            # movieInfo['ratingCount'] = int(soup.find(attrs={'itemprop' : 'ratingCount'}).get_text())
            # except:
            # movieInfo['ratingCount'] = 0

            try:
                movieInfo['director'] = []
                directors = soup.find('div',
                                      class_='item directors').findAll('a')
                for director in directors:
                    movieInfo['director'].append(director.get_text().strip())
                movieInfo['director'] = ', '.join(
                    movieInfo['director']).encode('utf-8')
            except:
                movieInfo['director'] = ''
        except:
            print traceback.format_exc()

        #print movieInfo

        return movieInfo

    def get_modal_info(self, url):
        html = self.load(url)
        movieInfo = {}
        movieInfo['error'] = False
        if not html:
            movieInfo['error'] = True
            return movieInfo

        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        try:
            movieInfo['desc'] = soup.find(
                'div', class_='full-story').get_text().strip()
        except:
            movieInfo['desc'] = ''

        try:
            movieInfo['title'] = soup.find('h1', class_='name').get_text()
        except:
            movieInfo['title'] = ''

        try:
            movieInfo['originaltitle'] = soup.find(
                'div', class_='origin-name').get_text().strip()
        except:
            movieInfo['originaltitle'] = ''

        if (movieInfo['originaltitle'] != ''):
            movieInfo['title'] = '%s / %s' % (movieInfo['title'],
                                              movieInfo['originaltitle'])

        try:
            movieInfo['poster'] = self.format_poster_link(
                soup.find('img', class_='poster poster-tooltip').get('src'))
        except:
            movieInfo['poster'] = ''

        movieInfo['desc'] = ''
        try:
            infos = soup.find('div', class_='full min').find_all('div',
                                                                 class_="item")
            skip = True
            for div in infos:
                if (skip):
                    skip = False
                    continue
                movieInfo['desc'] += self.format_desc_item(
                    div.get_text().strip()) + "\n"
        except:
            movieInfo['desc'] = traceback.format_exc()

        try:
            div = soup.find('div',
                            class_='full-panel').find('span',
                                                      class_='kinopoisk')
            rvalue = div.find('div', attrs={
                'itemprop': 'ratingValue'
            }).get_text().strip()
            rcount = div.find('div', attrs={
                'itemprop': 'ratingCount'
            }).get_text().strip()
            kp = xbmcup.app.lang[34029] % (self.format_rating(rvalue), rvalue,
                                           rcount)
            movieInfo['desc'] += kp + "\n"
        except:
            pass

        try:
            div = soup.find('div', class_='full-panel').find(
                'span', class_='imdb').find_all('div')
            rvalue = div[0].get_text().strip()
            rcount = div[1].get_text().strip()
            kp = xbmcup.app.lang[34030] % (self.format_rating(rvalue), rvalue,
                                           rcount)
            movieInfo['desc'] += kp + "\n"
        except:
            pass

        try:
            desc = soup.find('div', class_='full-story').get_text().strip()
            movieInfo['desc'] = '\n[COLOR blue]%s[/COLOR]\n%s' % (
                xbmcup.app.lang[34027], desc) + '\n' + movieInfo['desc']
        except:
            movieInfo['desc'] = traceback.format_exc()

        try:
            movieInfo['trailer'] = soup.find('li',
                                             attrs={
                                                 'data-id': "trailers"
                                             }).find('a').get('href')
        except:
            movieInfo['trailer'] = False

        return movieInfo

    def my_int(self, str):
        if (str == ''):
            return 0
        return int(str)

    def get_trailer(self, url):
        progress = xbmcgui.DialogProgress()
        progress.create(xbmcup.app.addon['name'])
        progress.update(0)
        html = self.load(url)
        movieInfo = {}
        movieInfo['error'] = False
        if not html:
            xbmcup.gui.message(xbmcup.app.lang[34031].encode('utf8'))
            progress.update(0)
            progress.close()
            return False

        progress.update(50)
        html = html.encode('utf-8')
        soup = xbmcup.parser.html(self.strip_scripts(html))

        link = self.decode_direct_media_url(
            soup.find('input', id='video5-link').get('value'))
        avail_quality = max(map(self.my_int, self.get_qualitys(link)))
        progress.update(100)
        progress.close()
        return self.format_direct_link(link, str(avail_quality))

    def format_desc_item(self, text):
        return re.compile(r'^([^:]+:)', re.S).sub('[COLOR blue]\\1[/COLOR] ',
                                                  re.sub(r'\s+', ' ', text))

    def strip_scripts(self, html):
        html = re.compile(r'<head[^>]*>(.*?)</head>',
                          re.S).sub('<head></head>', html)
        #������� ��� ���� <script></script> � �� ����������
        #������ ��� ����, ��� �� html parser �� ����� ������ �� ����� � js
        return re.compile(r'<script[^>]*>(.*?)</script>', re.S).sub('', html)

    def format_rating(self, rating):
        rating = float(rating)
        if (rating == 0):
            return 'white'
        elif (rating > 7):
            return 'ff59C641'
        elif (rating > 4):
            return 'ffFFB119'
        else:
            return 'ffDE4B64'

    def format_quality(self, quality):
        if (quality == ''): return ''
        if (quality.find('1080') != -1):
            q = 'HD'
        elif (quality.find('720') != -1):
            q = 'HQ'
        elif (quality.find('480') != -1):
            q = 'SQ'
        else:
            q = 'LQ'

        qualitys = {
            'HD': 'ff3BADEE',
            'HQ': 'ff59C641',
            'SQ': 'ffFFB119',
            'LQ': 'ffDE4B64'
        }
        if (q in qualitys):
            return "[COLOR %s][%s][/COLOR]" % (qualitys[q], quality)
        return ("[COLOR ffDE4B64][%s][/COLOR]" %
                quality if quality != '' else '')

    def get_page(self, soup):
        info = {'pagenum': 0, 'maxpage': 0}
        try:
            wrap = soup.find('div', class_='navigation')
            info['pagenum'] = int(wrap.find('span', class_='').get_text())
            try:
                info['maxpage'] = len(wrap.find('a', class_='next'))
                if (info['maxpage'] > 0):
                    info['maxpage'] = info['pagenum'] + 1
            except:
                info['maxpage'] = info['pagenum']
                print traceback.format_exc()

        except:
            info['pagenum'] = 1
            info['maxpage'] = 1
            print traceback.format_exc()

        return info

    def get_page_search(self, soup):
        info = {'pagenum': 0, 'maxpage': 0}
        try:
            wrap = soup.find('div', class_='navigation')
            current_page = wrap.find_all('span', class_='')
            info['pagenum'] = 1
            for cpage in current_page:
                if (cpage.get_text().find('...') == -1):
                    info['pagenum'] = int(cpage.get_text())
                    break

            try:
                clicks = wrap.find_all('span', class_='click')
                pages = []
                for page in clicks:
                    pages.append(int(page.get_text()))

                info['maxpage'] = max(pages)
            except:
                info['maxpage'] = info['pagenum']
                print traceback.format_exc()

        except:
            info['pagenum'] = 1
            info['maxpage'] = 1
            print traceback.format_exc()

        return info

    def get_movie_id(self, url):
        result = re.findall(r'\/([\d]+)\-', url)

        try:
            result = int(result[0])
        except:
            result = 0

        return result
Пример #30
0
class OpenFileDialog(QDialog):
	def __init__(self, filename, parent = None):
		QDialog.__init__(self, parent)
		
		self.ui = Ui_Dialog()
		self.ui.setupUi(self)

		self.ui.thresholdSlider.valueChanged.connect(self.redraw)
		self.ui.invertedCheckBox.toggled.connect(self.redraw)
		
		self.ui.progress.setEnabled(False)

		fileExt = filename.split('.')[-1]
		
		if fileExt == 'gbr':
			self.ui.imageBox.setVisible(False)
			self._loader = GerberLoader(self)
		elif fileExt in ['png', 'bmp', 'jpg']:
			self.ui.gerberBox.setVisible(False)
			self._loader = ImageLoader(self)

		if not self._loader.load(filename):
			self.close() #FIXME

		self._loader.progress.connect(self.ui.progress.setValue)
		self._loader.loaded.connect(self._loaded)

		if fileExt == 'gbr':
			self.redraw()
		else:
			self.ui.thresholdSlider.setValue(127)

	def redraw(self):
		self._loader.stop()

		self.ui.progress.setValue(0)
		self.ui.progress.setEnabled(True)

		self.t = time()
		if type(self._loader) == GerberLoader:
			self._loader.run(0.05)
		else:
			threshold = self.ui.thresholdSlider.value()
			inverted = self.ui.invertedCheckBox.isChecked()
			self._loader.run((threshold, inverted))
		
	def _loaded(self, image):
		print time() - self.t
		self.ui.progress.setValue(0)
		self.ui.progress.setEnabled(False)

		self._image = image
		self.ui.view.setFixedSize(image.size())
		self.ui.view.setPixmap(QPixmap.fromImage(image))

		
	def image(self):
		return self._image
	
	def closeEvent(self, event): #FIXME
		print 'qqqq'
Пример #31
0
def index():

    # Parse arguments
    argParser = argparse.ArgumentParser()
    argParser.add_argument(
        "-d",
        "--dataset",
        required=True,
        help="Path to directory that contains the images to be indexed")
    argParser.add_argument(
        "-i",
        "--index",
        help="Path to where teh computed idnex will be stored")
    argParser.add_argument(
        "-t",
        "--training",
        action="store_const",
        const="train",
        help=
        "If you want to jus train the knn model no csv file will be created")
    args = vars(argParser.parse_args())

    # Image dataset
    dataset_folder_path = args["dataset"]

    n_images = len([
        name for name in os.listdir(dataset_folder_path)
        if os.path.isfile(os.path.join(dataset_folder_path, name))
    ])

    imageLoader = ImageLoader()
    siftDescriptor = SiftDescriptor()

    descriptor_list = []
    descriptor_array = []

    if args["training"] == "train":

        # Reading images and getting their descriptors
        i = 1
        for imageID in os.listdir(dataset_folder_path):
            full_path_to_image = dataset_folder_path + imageID
            image = cv2.imread(full_path_to_image)
            image_grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            descriptor = siftDescriptor.describe(image)
            descriptor_list.extend(descriptor)
            descriptor_array.append(descriptor)
            print(i, "out of", n_images)
            i += 1

        # Getting clusters from the descriptor_list
        myKMeans = MyKMeans()
        print("Running k_means")
        clusters = myKMeans.k_means_batch(300, descriptor_list, 100)
        print("Finished k_means")

        dump(clusters, "train_k_means.joblib")
    else:

        # Open the indexFile in which we will save the indexed images
        indexFile = open(args["index"], "w")
        clusters = load("train_k_means.joblib")

        histogramBuilder = HistogramBuilder()

        # Reading images and getting their descriptors
        i = 1
        print("Building histograms")
        for imageID in os.listdir(dataset_folder_path):
            full_path_to_image = dataset_folder_path + imageID
            image = cv2.imread(full_path_to_image)
            image_grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            descriptor = siftDescriptor.describe(image)

            histogram = histogramBuilder.build_histogram_from_clusters(
                descriptor, clusters)

            print("Writing to index file")
            write_to_index(histogram, imageID, indexFile)
            print("Finished writing to index file")

            print(i, "out of", n_images)
            i += 1
        indexFile.close()
        print("Finished")
Пример #32
0
from image_loader import ImageLoader
import matplotlib.pyplot as plt

il = ImageLoader("../img_small/", 64)
batch = il.get_new_batch()
il.plotimage(batch)
plt.savefig('celeba_original.png', bbox_inches='tight')
Пример #33
0
import numpy as np
from network_loader import get_conv_seq, reconstruct, Network, torchimg2pyimg, pyimg2torchimg
from image_loader import ImageLoader
import torch
import numpy as np

network = Network("exp14_50_net_G.t7")
img_loader = ImageLoader(root_dir='/data/data/ILSVRC2010_images/val_small')
origin = img_loader.get_image_nparray_by_idx(0, -1, -1)
(width, height, nc) = origin.shape
mask = img_loader.get_random_mask(width, height)
img = img_loader.make_holes(origin, mask)
print "pyimg shape", img.shape

img = pyimg2torchimg(img)
print "torchimg shape", img.shape
fineSize = 128

img = (img * 2) - 1
img_seq, img_pos = get_conv_seq(img, fineSize, fineSize)
print "img_pos", img_pos

img_seq = torch.Tensor(img_seq)
mask_seq, _ = get_conv_seq(mask[np.newaxis, :], fineSize, fineSize)
mask_seq = torch.Tensor(mask_seq)
print "network.forward() ", mask_seq.size(), img_seq.size()

xx = img_seq.numpy()
#output_seq = network.net.forward([img_seq, mask_seq]).numpy()
#output_seq = (output_seq + 1) * 0.5