예제 #1
0
    def runSimsForCollection(self, collection, collConf, simulatorBaseDir):
        seleniumConfig = self.getConfigSetting("selenium")
        simulatorDirectory = os.path.join(simulatorBaseDir, "trunk", "tool", "selenium", "simulation", "demobrowser", "demo")
        #print "running simulations for demos: " + repr(collConf)
        
        #testReportFile = self.prepareTestReport(self.getConfigSetting("base/reportDirectory", ""), collection)
        
        scanDir = collConf["scanDir"]
        appList = util.locate(scanDir, "*.html")
        
        for appDir in appList:
            dir, file = os.path.split(appDir)
            category = os.path.basename(dir) 
            appName, ext = os.path.splitext(file)
            #print category + " - " + appName
            
            scriptFile = os.path.join(simulatorDirectory, category, appName + ".js")
            if os.path.isfile(scriptFile):
                #print "got a test script for " + category + "-" + appName

                self.log.info("Running simulations for %s" %category + "." + appName)
                
                for browser in collConf["browsers"]:
                    seleniumServer = SeleniumServer(seleniumConfig, logger = self.log)
                    seleniumServer.start()
                    simConf = self.getSimulationConfig(collection, "collections", browser)
                    simConf["autPath"] = appDir
                    simConf["simulationScript"] = scriptFile
                    #print repr(simConf)            
                    sim = Simulation(simConf)
                    sim.run()
                    seleniumServer.stop()
예제 #2
0
def main():
    root = Tk()
    root.title('Rescaling')

    W, H = 700, 500
    PAD = 50
    canvas = Canvas(root, width=W, height=H, background='black')
    canvas.grid(row=0, column=1)

    #triangle = make_random_triangle(W, H)
    triangle = [
        Point(W / 2, PAD),
        Point(PAD, H - PAD),
        Point(W - 3 * PAD, H - 4 * PAD)
    ]
    scale_factor = 0.8

    for i in range(37):
        color1 = random_color()
        color2 = random_color()
        center = locate(triangle)['center']
        t_list = [point.as_list() for point in triangle]
        print(t_list)
        canvas.create_polygon(t_list, fill=color1, outline=color2, width=2)
        triangle = rotate_shape(triangle, center, degrees=10)
        [print(point) for point in triangle]

    root.mainloop()
예제 #3
0
파일: rules.py 프로젝트: la-mar/prodstats
 def from_list(cls, criteria: List[Dict], **kwargs) -> "ParserRule":
     """ Initialize a rule from a list of criteria specifications.
             Example criteria spec:
                 criteria = \
                     [
                         {
                             "name": "parse_integers",
                             "type": "RegexCriterion",
                             "value": r"^[-+]?[0-9]+$",
                         },
                     ],
      """
     criteriaObjs: List[Criterion] = []
     for c in criteria:
         CriteriaType = util.locate(c["type"], "parsing.criteria")
         criteriaObjs.append(CriteriaType(c["value"], c["name"]))
     return cls(criteriaObjs, **kwargs)
예제 #4
0
파일: client.py 프로젝트: la-mar/prodstats
    def credentials(self, credentials: Union[HTTPAuth, Dict, str]):
        """ Set the auth credentials using a pydantic credential model or the dotted
            import path to such a model """

        if credentials:
            if isinstance(credentials, str):
                credentials = util.locate(credentials)

            if inspect.isclass(credentials):
                credentials = credentials()

            if not isinstance(credentials, dict):

                if isinstance(credentials, HTTPAuth):
                    credentials = credentials.dict(reveal=True)

            self._credentials = credentials
예제 #5
0
def main():
    root = Tk()
    root.title('Bounding Boxes')

    W, H = 700, 500
    canvas = Canvas(root, width=W, height=H, background='black')
    canvas.grid(row=0, column=1)

    N_TRIANGLE = 10
    BOUND_COLOR = '#ff0000'

    triangles = [make_triangle(W, H) for i in range(N_TRIANGLE)]
    for triangle in triangles:
        bb = locate(triangle)['bb']
        bb = [point.as_list() for point in bb]
        triangle = [point.as_list() for point in triangle]
        canvas.create_polygon(triangle, fill=random_color())
        canvas.create_rectangle(bb, fill=None, outline=BOUND_COLOR)

    root.mainloop()
예제 #6
0
    def runSimsForCollection(self, collection, collConf, simulatorBaseDir):
        seleniumConfig = self.getConfigSetting("selenium")
        simulatorDirectory = os.path.join(simulatorBaseDir, "trunk", "tool",
                                          "selenium", "simulation",
                                          "demobrowser", "demo")
        #print "running simulations for demos: " + repr(collConf)

        #testReportFile = self.prepareTestReport(self.getConfigSetting("base/reportDirectory", ""), collection)

        scanDir = collConf["scanDir"]
        appList = util.locate(scanDir, "*.html")

        for appDir in appList:
            dir, file = os.path.split(appDir)
            category = os.path.basename(dir)
            appName, ext = os.path.splitext(file)
            #print category + " - " + appName

            scriptFile = os.path.join(simulatorDirectory, category,
                                      appName + ".js")
            if os.path.isfile(scriptFile):
                #print "got a test script for " + category + "-" + appName

                self.log.info("Running simulations for %s" % category + "." +
                              appName)

                for browser in collConf["browsers"]:
                    seleniumServer = SeleniumServer(seleniumConfig,
                                                    logger=self.log)
                    seleniumServer.start()
                    simConf = self.getSimulationConfig(collection,
                                                       "collections", browser)
                    simConf["autPath"] = appDir
                    simConf["simulationScript"] = scriptFile
                    #print repr(simConf)
                    sim = Simulation(simConf)
                    sim.run()
                    seleniumServer.stop()
예제 #7
0
def main():
    # initialize parameters
    parser = init()
    os.environ["CUDA_VISIBLE_DEVICES"] = parser.gpu
    categorical_cardinality = parser.categorical_cardinality
    data_path = parser.data_path
    styles = parser.styles
    image_size = parser.image_size
    force_grayscale = parser.force_grayscale
    channel_size = 1 if force_grayscale else 3
    seed = parser.seed
    lr = parser.lr
    batch_size = parser.batch_size
    epochs = parser.epochs
    kernel = parser.kernel
    stride = parser.stride
    class_dim = parser.class_dim
    reconstruct_coef = parser.reconstruct_coef
    generator_coef = parser.generator_coef
    discriminator_coef = parser.discriminator_coef

    # load data
    imageName, imageDict = locate(data_path,
                                  styles=styles,
                                  max_label=categorical_cardinality)
    _, imageTrue = locate(data_path, max_label=categorical_cardinality)
    imageNum = len(imageName)

    image1 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image1")
    image2 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image2")
    is_training = tf.placeholder(tf.bool, [], name="is_training")

    forward_loss, reconstruct_loss, generator_loss, discriminator_loss, image1_forward_reconstruct, _ = cycle_consistent_vae_with_gan(
        image1, image2, kernel, stride, class_dim, is_training,
        reconstruct_coef, generator_coef, discriminator_coef,
        'cycle-consistent-vae-with-gan')

    encoder_variables = scope_variables(
        "cycle-consistent-vae-with-gan/encoder")
    decoder_variables = scope_variables(
        'cycle-consistent-vae-with-gan/decoder')
    discriminator_variables = scope_variables(
        'cycle-consistent-vae-with-gan/discriminator')

    forward_solver = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
    generator_solver = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
    discriminator_solver = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
    forward_train = forward_solver.minimize(forward_loss,
                                            var_list=encoder_variables +
                                            decoder_variables)
    generator_train = generator_solver.minimize(generator_loss,
                                                var_list=decoder_variables)
    discriminator_train = discriminator_solver.minimize(
        discriminator_loss, var_list=discriminator_variables)

    idxes_1 = np.arange(imageNum, dtype=np.int32)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = parser.gpu_fraction
    saver = tf.train.Saver()
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        for epoch in range(epochs):
            np.random.shuffle(idxes_1)
            forward_losses = []
            generator_losses = []
            discriminator_losses = []

            for idx in range(0, imageNum, batch_size):
                image1_batch = loader(imageName[idxes_1[idx:idx + batch_size]],
                                      desired_height=image_size,
                                      desired_width=image_size,
                                      value_range=(0.0, 1.0),
                                      force_grayscale=force_grayscale)
                image2_batch = loader(find_truth(
                    imageName[idxes_1[idx:idx + batch_size]], imageTrue),
                                      desired_height=image_size,
                                      desired_width=image_size,
                                      value_range=(0.0, 1.0),
                                      force_grayscale=force_grayscale)

                feed_dict_training = {
                    image1: image1_batch,
                    image2: image2_batch,
                    is_training: True
                }

                # forward
                _, _forward_loss = sess.run([forward_train, forward_loss],
                                            feed_dict=feed_dict_training)
                forward_losses.append(_forward_loss)

                # generator
                _, _generator_loss = sess.run(
                    [generator_train, generator_loss],
                    feed_dict=feed_dict_training)
                generator_losses.append(_generator_loss)

                # discriminator
                _, _discriminator_loss = sess.run(
                    [discriminator_train, discriminator_loss],
                    feed_dict=feed_dict_training)
                discriminator_losses.append(_discriminator_loss)

            print(
                'epoch: %d\nforward_loss: %f, generator_loss: %f, discriminator_loss: %f\n'
                % (epoch, get_mean(forward_losses), get_mean(generator_losses),
                   get_mean(discriminator_losses)))

            image1_plot = loader(imageName[idxes_1[0:10]],
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image2_plot = loader(find_truth(imageName[idxes_1[0:10]],
                                            imageTrue),
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            feed_dict_not_training = {
                image1: image1_plot,
                image2: image2_plot,
                is_training: False
            }
            image1_reconstruct = sess.run(image1_forward_reconstruct,
                                          feed_dict=feed_dict_not_training)
            plot(image1_plot, image1_reconstruct, image2_plot, epoch)
        saver.save(sess, 'ckpt/model')
예제 #8
0
def main():
    # initialize parameters
    parser = init()
    os.environ["CUDA_VISIBLE_DEVICES"] = parser.gpu
    categorical_cardinality = parser.categorical_cardinality
    fraction = parser.fraction
    data_path = parser.data_path
    style_1 = parser.style_1
    style_2 = parser.style_2
    image_size = parser.image_size
    force_grayscale = parser.force_grayscale
    channel_size = 1 if force_grayscale else 3
    seed = parser.seed
    lr = parser.lr
    loss_type = parser.loss_type
    batch_size = parser.batch_size
    epochs = parser.epochs
    kernel = parser.kernel
    stride = parser.stride
    class_dim = parser.class_dim
    style_dim = parser.style_dim
    reconstruct_coef_1 = parser.reconstruct_coef_1
    reconstruct_coef_2 = parser.reconstruct_coef_2
    reconstruct_coef_3 = parser.reconstruct_coef_3
    generator_coef = parser.generator_coef
    discriminator_coef = parser.discriminator_coef

    # load data
    partition = np.arange(categorical_cardinality, dtype=np.int32)
    np.random.shuffle(partition)
    partition = partition[:int(categorical_cardinality * (1 - fraction))]
    print('partition:\n', partition)
    imageNameTrain1, imageDictTrain1, imageNameTest1, imageDictTest1 = locate(
        data_path,
        styles=['std/' + style_1 + '/cut'],
        max_label=categorical_cardinality,
        partition=partition)
    imageNameTrain2, imageDictTrain2, imageNameTest2, imageDictTest2 = locate(
        data_path,
        styles=['std/' + style_2 + '/cut'],
        max_label=categorical_cardinality,
        partition=partition)
    imageNameTrain3, imageDictTrain3, imageNameTest3, imageDictTest3 = locate(
        data_path,
        styles=['std/0/cut'],
        max_label=categorical_cardinality,
        partition=partition)
    imageNum = len(imageNameTrain1)

    image1 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image1")
    image2 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image2")
    image3 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image3")
    image4 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image4")
    image5 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image5")
    image6 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image6")
    is_training = tf.placeholder(tf.bool, [], name="is_training")

    forward_loss, reconstruct_loss_1, reconstruct_loss_2, reconstruct_loss_3, generator_loss, discriminator_loss, \
    image1_forward_reconstruct, image2_forward_reconstruct, image3_forward_reconstruct, image4_forward_reconstruct, \
    class_vector_1, style_vector_1, image1_style_reconstruct, image3_style_reconstruct = ae_with_gan(image1,image2,image3,image4,image5,image6,kernel,stride,class_dim,style_dim,is_training, loss_type,
                                                                                                     reconstruct_coef_1,reconstruct_coef_2,reconstruct_coef_3,generator_coef,discriminator_coef,
                                                                                                     'ae-with-gan')

    encoder_variables = scope_variables("ae-with-gan/encoder")
    decoder_variables = scope_variables('ae-with-gan/decoder')
    discriminator_variables_1 = scope_variables('ae-with-gan/discriminator_1')
    discriminator_variables_2 = scope_variables('ae-with-gan/discriminator_2')
    discriminator_variables_3 = scope_variables('ae-with-gan/discriminator_3')
    #all_variables = scope_variables('ae-with-gan')
    #print([n.name for n in tf.get_default_graph().as_graph_def().node])
    #print([n.name for n in all_variables])

    forward_solver = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
    generator_solver = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
    discriminator_solver = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
    forward_train = forward_solver.minimize(forward_loss,
                                            var_list=encoder_variables +
                                            decoder_variables)
    generator_train = generator_solver.minimize(generator_loss,
                                                var_list=decoder_variables)
    discriminator_train = discriminator_solver.minimize(
        discriminator_loss,
        var_list=discriminator_variables_1 + discriminator_variables_2 +
        discriminator_variables_3)

    idxes_1 = np.arange(imageNum, dtype=np.int32)
    idxes_2 = np.arange(imageNum, dtype=np.int32)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = parser.gpu_fraction
    saver = tf.train.Saver()
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        for epoch in range(epochs):
            np.random.shuffle(idxes_1)
            np.random.shuffle(idxes_2)
            forward_losses = []
            reconstruct_losses_1 = []
            reconstruct_losses_2 = []
            reconstruct_losses_3 = []
            generator_losses = []
            discriminator_losses = []

            for idx in range(0, imageNum, batch_size):
                image1_batch = loader(imageNameTrain1[idxes_1[idx:idx +
                                                              batch_size]],
                                      desired_height=image_size,
                                      desired_width=image_size,
                                      value_range=(0.0, 1.0),
                                      force_grayscale=force_grayscale)
                image2_batch = loader(find_truth(
                    imageNameTrain1[idxes_1[idx:idx + batch_size]],
                    imageDictTrain3),
                                      desired_height=image_size,
                                      desired_width=image_size,
                                      value_range=(0.0, 1.0),
                                      force_grayscale=force_grayscale)
                image3_batch = loader(imageNameTrain2[idxes_2[idx:idx +
                                                              batch_size]],
                                      desired_height=image_size,
                                      desired_width=image_size,
                                      value_range=(0.0, 1.0),
                                      force_grayscale=force_grayscale)
                image4_batch = loader(find_truth(
                    imageNameTrain2[idxes_2[idx:idx + batch_size]],
                    imageDictTrain3),
                                      desired_height=image_size,
                                      desired_width=image_size,
                                      value_range=(0.0, 1.0),
                                      force_grayscale=force_grayscale)
                image5_batch = loader(find_truth(
                    imageNameTrain1[idxes_1[idx:idx + batch_size]],
                    imageDictTrain2),
                                      desired_height=image_size,
                                      desired_width=image_size,
                                      value_range=(0.0, 1.0),
                                      force_grayscale=force_grayscale)
                image6_batch = loader(find_truth(
                    imageNameTrain2[idxes_2[idx:idx + batch_size]],
                    imageDictTrain1),
                                      desired_height=image_size,
                                      desired_width=image_size,
                                      value_range=(0.0, 1.0),
                                      force_grayscale=force_grayscale)
                feed_dict_training = {
                    image1: image1_batch,
                    image2: image2_batch,
                    image3: image3_batch,
                    image4: image4_batch,
                    image5: image5_batch,
                    image6: image6_batch,
                    is_training: True
                }

                # forward
                _, _forward_loss, _reconstruct_loss_1, _reconstruct_loss_2, _reconstruct_loss_3 = sess.run(
                    [
                        forward_train, forward_loss, reconstruct_loss_1,
                        reconstruct_loss_2, reconstruct_loss_3
                    ],
                    feed_dict=feed_dict_training)
                forward_losses.append(_forward_loss)
                reconstruct_losses_1.append(_reconstruct_loss_1)
                reconstruct_losses_2.append(_reconstruct_loss_2)
                reconstruct_losses_3.append(_reconstruct_loss_3)

                # generator
                _, _generator_loss = sess.run(
                    [generator_train, generator_loss],
                    feed_dict=feed_dict_training)
                generator_losses.append(_generator_loss)

                # discriminator
                _, _discriminator_loss = sess.run(
                    [discriminator_train, discriminator_loss],
                    feed_dict=feed_dict_training)
                discriminator_losses.append(_discriminator_loss)

            print('epoch: %d\nforward_loss: %f\nself_reconstruct_loss: %f\ntruth_reconstruct_loss: %f\ntransfer_reconstruct_loss: %f\ngenerator_loss: %f\ndiscriminator_loss: %f\n' % \
                (epoch, get_mean(forward_losses), get_mean(reconstruct_losses_1), get_mean(reconstruct_losses_2), get_mean(reconstruct_losses_3), get_mean(generator_losses), get_mean(discriminator_losses)))

            # test
            image1_plot = loader(imageNameTrain1[idxes_1[0:10]],
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image2_plot = loader(find_truth(imageNameTrain1[idxes_1[0:10]],
                                            imageDictTrain3),
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image3_plot = loader(imageNameTrain2[idxes_2[0:10]],
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image4_plot = loader(find_truth(imageNameTrain2[idxes_2[0:10]],
                                            imageDictTrain3),
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image5_plot = loader(find_truth(imageNameTrain1[idxes_1[0:10]],
                                            imageDictTrain2),
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image6_plot = loader(find_truth(imageNameTrain2[idxes_2[0:10]],
                                            imageDictTrain1),
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            feed_dict_not_training = {
                image1: image1_plot,
                image2: image2_plot,
                image3: image3_plot,
                image4: image4_plot,
                image5: image5_plot,
                image6: image6_plot,
                is_training: False
            }
            _image1_forward_reconstruct, _image2_forward_reconstruct, _image3_forward_reconstruct, _image4_forward_reconstruct, _image1_style_reconstruct, _image3_style_reconstruct = sess.run(
                [
                    image1_forward_reconstruct, image2_forward_reconstruct,
                    image3_forward_reconstruct, image4_forward_reconstruct,
                    image1_style_reconstruct, image3_style_reconstruct
                ],
                feed_dict=feed_dict_not_training)
            images = [
                image1_plot, image2_plot, image3_plot, image4_plot,
                image5_plot, image6_plot, _image1_forward_reconstruct,
                _image2_forward_reconstruct, _image3_forward_reconstruct,
                _image4_forward_reconstruct, _image1_style_reconstruct,
                _image3_style_reconstruct
            ]
            coefs = [
                loss_type, lr, reconstruct_coef_1, reconstruct_coef_2,
                reconstruct_coef_3, generator_coef, discriminator_coef
            ]
            plot_batch(images, 'train', epoch, coefs)

            image1_plot = loader(imageNameTest1,
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image2_plot = loader(find_truth(imageNameTest1, imageDictTest3),
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image3_plot = loader(imageNameTest2,
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image4_plot = loader(find_truth(imageNameTest2, imageDictTest3),
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image5_plot = loader(find_truth(imageNameTest1, imageDictTest2),
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image6_plot = loader(find_truth(imageNameTest2, imageDictTest1),
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            feed_dict_not_training = {
                image1: image1_plot,
                image2: image2_plot,
                image3: image3_plot,
                image4: image4_plot,
                image5: image5_plot,
                image6: image6_plot,
                is_training: False
            }
            _image1_forward_reconstruct, _image2_forward_reconstruct, _image3_forward_reconstruct, _image4_forward_reconstruct, _image1_style_reconstruct, _image3_style_reconstruct = sess.run(
                [
                    image1_forward_reconstruct, image2_forward_reconstruct,
                    image3_forward_reconstruct, image4_forward_reconstruct,
                    image1_style_reconstruct, image3_style_reconstruct
                ],
                feed_dict=feed_dict_not_training)
            images = [
                image1_plot, image2_plot, image3_plot, image4_plot,
                image5_plot, image6_plot, _image1_forward_reconstruct,
                _image2_forward_reconstruct, _image3_forward_reconstruct,
                _image4_forward_reconstruct, _image1_style_reconstruct,
                _image3_style_reconstruct
            ]
            coefs = [
                loss_type, lr, reconstruct_coef_1, reconstruct_coef_2,
                reconstruct_coef_3, generator_coef, discriminator_coef
            ]
            plot_batch(images, 'test', epoch, coefs)

        coefs = [
            loss_type, lr, reconstruct_coef_1, reconstruct_coef_2,
            reconstruct_coef_3, generator_coef, discriminator_coef
        ]
        suffix = ''
        for coef in coefs:
            suffix += str(coef) + '-'
        saver.save(sess,
                   os.path.join(os.path.join('ckpt', suffix[:-1]), 'model'))
예제 #9
0
def test_with_graph_manually_set_up():
    parser = init()
    os.environ["CUDA_VISIBLE_DEVICES"] = parser.gpu
    categorical_cardinality = parser.categorical_cardinality
    data_path = parser.data_path
    styles = parser.styles
    image_size = parser.image_size
    force_grayscale = parser.force_grayscale
    channel_size = 1 if force_grayscale else 3
    seed = parser.seed
    lr = parser.lr
    batch_size = parser.batch_size
    epochs = parser.epochs
    kernel = parser.kernel
    stride = parser.stride
    class_dim = parser.class_dim
    reconstruct_coef = parser.reconstruct_coef
    generator_coef = parser.generator_coef
    discriminator_coef = parser.discriminator_coef

    imageName, imageDict = locate(data_path,
                                  styles=styles,
                                  max_label=categorical_cardinality)
    _, imageTrue = locate(data_path, max_label=categorical_cardinality)
    imageNum = len(imageName)

    image1 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image1")
    image2 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image2")
    is_training = tf.placeholder(tf.bool, [], name="is_training")

    forward_loss, reconstruct_loss, generator_loss, discriminator_loss, image1_forward_reconstruct, vector = cycle_consistent_vae_with_gan(
        image1, image2, kernel, stride, class_dim, is_training,
        reconstruct_coef, generator_coef, discriminator_coef,
        'cycle-consistent-vae-with-gan')

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = parser.gpu_fraction
    saver = tf.train.Saver()
    idxes_1 = np.arange(imageNum, dtype=np.int32)
    np.random.shuffle(idxes_1)
    with tf.Session(config=config) as sess:
        saver.restore(sess, tf.train.latest_checkpoint('ckpt/'))
        image1_test = loader(imageName[idxes_1[0:10]],
                             desired_height=image_size,
                             desired_width=image_size,
                             value_range=(0.0, 1.0),
                             force_grayscale=force_grayscale)
        image2_test = loader(find_truth(imageName[idxes_1[0:10]], imageTrue),
                             desired_height=image_size,
                             desired_width=image_size,
                             value_range=(0.0, 1.0),
                             force_grayscale=force_grayscale)
        feed_dict_not_training = {
            image1: image1_test,
            image2: image2_test,
            is_training: False
        }
        image_reconstruct, latent_vector = sess.run(
            [image1_forward_reconstruct, vector],
            feed_dict=feed_dict_not_training)
        print(latent_vector.shape)
        print(latent_vector)
        plot(image1_test, image_reconstruct, image2_test, 0)
예제 #10
0
def test_with_graph_automatically_loaded():
    parser = init()
    os.environ["CUDA_VISIBLE_DEVICES"] = parser.gpu
    categorical_cardinality = parser.categorical_cardinality
    data_path = parser.data_path
    styles = parser.styles
    image_size = parser.image_size
    force_grayscale = parser.force_grayscale
    channel_size = 1 if force_grayscale else 3
    seed = parser.seed
    lr = parser.lr
    batch_size = parser.batch_size
    epochs = parser.epochs
    kernel = parser.kernel
    stride = parser.stride
    class_dim = parser.class_dim
    reconstruct_coef = parser.reconstruct_coef
    generator_coef = parser.generator_coef
    discriminator_coef = parser.discriminator_coef

    imageName, imageDict = locate(data_path,
                                  styles=styles,
                                  max_label=categorical_cardinality)
    _, imageTrue = locate(data_path, max_label=categorical_cardinality)
    imageNum = len(imageName)

    #image1 = tf.placeholder(tf.float32,[None, image_size, image_size, channel_size],name="image1")
    #image2 = tf.placeholder(tf.float32,[None, image_size, image_size, channel_size],name="image2")
    #is_training = tf.placeholder(tf.bool,[],name="is_training")

    saver = tf.train.import_meta_graph('ckpt/server-2/model.meta')
    graph = tf.get_default_graph()
    with graph.as_default():
        variable_names = [v.name for v in tf.all_variables()]
        print(variable_names)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = parser.gpu_fraction
        idxes_1 = np.arange(imageNum, dtype=np.int32)
        np.random.shuffle(idxes_1)
        with tf.Session(config=config) as sess:
            saver.restore(sess, tf.train.latest_checkpoint('ckpt/server-2/'))
            image1_test = loader(imageName[idxes_1[0:10]],
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image2_test = loader(find_truth(imageName[idxes_1[0:10]],
                                            imageTrue),
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            image1 = graph.get_tensor_by_name('image1:0')
            image2 = graph.get_tensor_by_name('image2:0')
            is_training = graph.get_tensor_by_name('is_training:0')
            feed_dict_not_training = {
                image1: image1_test,
                image2: image2_test,
                is_training: False
            }
            class_vector = graph.get_operation_by_name(
                'cycle-consistent-vae-with-gan/encoder/class_vector/fully_connected/Maximum'
            )
            image_reconstruct = graph.get_operation_by_name(
                'cycle-consistent-vae-with-gan/decoder/conv4/Conv2d_transpose/Sigmoid'
            )
            _class_vector, _image_reconstruct = sess.run(
                [class_vector, image_reconstruct],
                feed_dict=feed_dict_not_training)
            print(_class_vector.shape)
            plot(image1_test, _image_reconstruct, image2_test, 0)
예제 #11
0
 def test_locate_with_context_dict(self):
     assert issubclass(locate("ProdStat", globals()), Model)
예제 #12
0
def transfer_style():
    parser = init()
    os.environ["CUDA_VISIBLE_DEVICES"] = parser.gpu
    categorical_cardinality = parser.categorical_cardinality
    data_path = parser.data_path
    styles = parser.styles
    image_size = parser.image_size
    force_grayscale = parser.force_grayscale
    channel_size = 1 if force_grayscale else 3
    seed = parser.seed
    lr = parser.lr
    batch_size = parser.batch_size
    epochs = parser.epochs
    kernel = parser.kernel
    stride = parser.stride
    class_dim = parser.class_dim
    style_dim = parser.style_dim
    reconstruct_coef_1 = parser.reconstruct_coef_1
    reconstruct_coef_2 = parser.reconstruct_coef_2
    generator_coef = parser.generator_coef
    discriminator_coef = parser.discriminator_coef

    imageName, imageDict = locate(data_path,
                                  styles=styles,
                                  max_label=categorical_cardinality)
    _, imageTrue = locate(data_path, max_label=categorical_cardinality)
    imageNum = len(imageName)

    image1 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image1")
    image2 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image2")
    is_training = tf.placeholder(tf.bool, [], name="is_training")

    image1_forward_reconstruct, image2_forward_reconstruct = transfer(
        image1, image2, kernel, stride, class_dim, style_dim, is_training,
        'cycle-consistent-vae-with-gan')

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = parser.gpu_fraction
    saver = tf.train.Saver()
    idxes_1 = np.arange(imageNum, dtype=np.int32)
    np.random.shuffle(idxes_1)
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(
            sess, tf.train.latest_checkpoint('ckpt/server-14/1.0-1.0-5e-05-'))
        for idx in range(0, imageNum, batch_size):
            image_batch = loader(imageName[idxes_1[idx:idx + batch_size]],
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            feed_dict_not_training = {
                image1: image_batch[:batch_size // 2, :],
                image2: image_batch[batch_size // 2:, :],
                is_training: False
            }
            _image1_forward_reconstruct, _image2_forward_reconstruct = sess.run(
                [image1_forward_reconstruct, image2_forward_reconstruct],
                feed_dict=feed_dict_not_training)
            print(_image1_forward_reconstruct.shape,
                  _image2_forward_reconstruct.shape)
            exit(0)
예제 #13
0
 def test_locate_with_dotted_path(self):
     assert issubclass(locate("db.models.ProdStat"), Model)
예제 #14
0
 def test_locate_local_name_with_empty_context(self):
     with pytest.raises(ValueError):
         locate("ProdStat", {})
예제 #15
0
 def test_locate_with_context_str(self):
     assert issubclass(locate("ProdStat", "db.models"), Model)
예제 #16
0
def main():
    # initialize parameters
    parser = init()
    os.environ["CUDA_VISIBLE_DEVICES"] = parser.gpu
    categorical_cardinality = parser.categorical_cardinality
    fraction = parser.fraction
    data_path = parser.data_path
    styles = parser.styles
    image_size = parser.image_size
    force_grayscale = parser.force_grayscale
    augment = parser.augment
    channel_size = 1 if force_grayscale else 3
    seed = parser.seed
    lr = parser.lr
    loss_type = parser.loss_type
    batch_size = parser.batch_size
    epochs = parser.epochs
    kernel = parser.kernel
    stride = parser.stride
    class_dim = parser.class_dim
    style_dim = parser.style_dim
    reconstruct_coef_1 = parser.reconstruct_coef_1
    reconstruct_coef_3 = parser.reconstruct_coef_3
    generator_coef = parser.generator_coef
    discriminator_coef = parser.discriminator_coef

    # load data
    partition = make_partition(300, categorical_cardinality, fraction)
    imageNameTrain, imageNameTest = locate(data_path,
                                           styles=styles,
                                           max_label=categorical_cardinality,
                                           partition=partition)
    styleNum, charNum, imageNum = imageNameTrain.shape[
        0], imageNameTrain.shape[
            1], imageNameTrain.shape[0] * imageNameTrain.shape[1]

    image1 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image1")
    image1_binary = binary(image1, image_size, channel_size, 0.7)
    '''
    image1_averge_org = tf.reduce_mean(image1,[1,2,3])
    image1_average = tf.reshape(tf.reduce_mean(image1,[1,2,3]),[-1,1,1,1])
    #image1_average = tf.tile(image1_average,[1, image_size, image_size, channel_size])
    image1_average = tf.cast(tf.ones_like(image1),tf.float32)*0.7
    image1_mask = tf.cast(tf.less(image1,image1_average),tf.float32)
    image1_binary = image1*image1_mask+(1-image1_mask)
    '''

    idxes_1 = np.arange(imageNum, dtype=np.int32)
    idxes_2 = np.arange(imageNum, dtype=np.int32)
    np.random.shuffle(idxes_1)
    np.random.shuffle(idxes_2)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = parser.gpu_fraction
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        image1_plot, _, _, _, _, _, _ = loader(imageNameTrain,
                                               idxes_1[0:10],
                                               idxes_2[0:10],
                                               styleNum,
                                               charNum,
                                               desired_height=image_size,
                                               desired_width=image_size,
                                               value_range=(0.0, 1.0),
                                               augment=augment,
                                               force_grayscale=force_grayscale)

        feed_dict = {image1: image1_plot}
        _image1_binary = sess.run(image1_binary, feed_dict=feed_dict)

        images = [image1_plot, _image1_binary]
        coefs = [
            loss_type, lr, reconstruct_coef_1, reconstruct_coef_3,
            generator_coef, discriminator_coef
        ]
        plot_batch(images, 'train', 0, coefs)
        exit(0)
예제 #17
0
def main():
    # initialize parameters
    parser = init()
    os.environ["CUDA_VISIBLE_DEVICES"] = parser.gpu
    categorical_cardinality = parser.categorical_cardinality
    data_path = parser.data_path
    styles = parser.styles
    image_size = parser.image_size
    force_grayscale = parser.force_grayscale
    channel_size = 1 if force_grayscale else 3
    seed = parser.seed
    lr = parser.lr
    batch_size = parser.batch_size
    epochs = parser.epochs
    kernel = parser.kernel
    stride = parser.stride
    class_dim = parser.class_dim
    style_dim = parser.style_dim
    continuous_dim = parser.continuous_dim
    reconstruct_coef = parser.reconstruct_coef
    continuous_coef = parser.continuous_coef
    generator_coef = parser.generator_coef
    discriminator_coef = parser.discriminator_coef

    # load data
    imageName, imageDict = locate(data_path,
                                  styles=styles,
                                  max_label=categorical_cardinality)
    imageNum = len(imageName)

    image1 = tf.placeholder(tf.float32,
                            [None, image_size, image_size, channel_size],
                            name="image1")
    is_training = tf.placeholder(tf.bool, [], name="is_training")
    zc_vector = tf.placeholder(tf.float32,
                               [None, class_dim + style_dim + continuous_dim],
                               name="zc_vector")

    reconstruct_loss, continuous_loss, generator_loss, discriminator_loss, image_reconstruct = infogan(
        zc_vector, image1, kernel, stride, class_dim, style_dim,
        continuous_dim, is_training, reconstruct_coef, continuous_coef,
        generator_coef, discriminator_coef, 'infogan')

    decoder_variables = scope_variables('infogan/decoder')
    discriminator_variables = scope_variables('infogan/discriminator')
    mutual_variables = scope_variables('infogan/mutual_fc')

    reconstruct_solver = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
    continuous_solver = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
    generator_solver = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
    discriminator_solver = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.5)
    reconstruct_train = reconstruct_solver.minimize(reconstruct_loss,
                                                    var_list=decoder_variables)
    continuous_train = continuous_solver.minimize(continuous_loss,
                                                  var_list=decoder_variables +
                                                  discriminator_variables +
                                                  mutual_variables)
    generator_train = generator_solver.minimize(generator_loss,
                                                var_list=decoder_variables)
    discriminator_train = discriminator_solver.minimize(
        discriminator_loss, var_list=discriminator_variables)

    idxes_1 = np.arange(imageNum, dtype=np.int32)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = parser.gpu_fraction
    saver = tf.train.Saver()
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        for epoch in range(epochs):
            np.random.shuffle(idxes_1)
            reconstruct_losses = []
            continuous_losses = []
            generator_losses = []
            discriminator_losses = []

            for idx in range(0, imageNum, batch_size):
                image1_batch = loader(imageName[idxes_1[idx:idx + batch_size]],
                                      desired_height=image_size,
                                      desired_width=image_size,
                                      value_range=(0.0, 1.0),
                                      force_grayscale=force_grayscale)
                vector = get_vector(imageName[idxes_1[idx:idx + batch_size]],
                                    style_dim, continuous_dim)
                feed_dict_training = {
                    image1: image1_batch,
                    zc_vector: vector,
                    is_training: True
                }

                # decoder
                _, _, _, _reconstruct_loss, _continuous_loss, _generator_loss = sess.run(
                    [
                        reconstruct_train, continuous_train, generator_train,
                        reconstruct_loss, continuous_loss, generator_loss
                    ],
                    feed_dict=feed_dict_training)
                reconstruct_losses.append(_reconstruct_loss)
                continuous_losses.append(_continuous_loss)
                generator_losses.append(_generator_loss)

                # discriminator
                _, _discriminator_loss = sess.run(
                    [discriminator_train, discriminator_loss],
                    feed_dict=feed_dict_training)
                discriminator_losses.append(_discriminator_loss)

            print(
                'epoch: %d\nreconstruct_loss: %f, continuous_loss: %f\ngenerator_loss: %f, discriminator_loss: %f\n'
                % (epoch, get_mean(reconstruct_losses),
                   get_mean(continuous_losses), get_mean(generator_losses),
                   get_mean(discriminator_losses)))

            image1_plot = loader(imageName[idxes_1[0:10]],
                                 desired_height=image_size,
                                 desired_width=image_size,
                                 value_range=(0.0, 1.0),
                                 force_grayscale=force_grayscale)
            feed_dict_not_training = {
                image1: image1_plot,
                zc_vector: vector,
                is_training: False
            }
            _image_reconstruct = sess.run(image_reconstruct,
                                          feed_dict=feed_dict_not_training)
            plot(image1_plot, _image_reconstruct, epoch)
        saver.save(sess, 'ckpt/infogan/model')