Beispiel #1
0
    def setup(cls):
        def multipleOf4Criterion(number):
            return number % 4 == 0

        def multipleOf8Criterion(number):
            return number % 8 == 0

        cls.generatorA = Generator(65, 16807, criterion=multipleOf4Criterion)
        cls.generatorB = Generator(8921, 48271, criterion=multipleOf8Criterion)
Beispiel #2
0
 def generateNewImage(self):
     if (self.dataset.get() == 1):
         path = "models/faces/celeba" + str(self.num.get() * 10) + ".model"
         firstModel = torch.load(path)
         netG = Generator(ngpu).to(self.device)
         netG.load_state_dict(firstModel["generator_state_dict"])
         netG.eval()
         fake1 = netG(self.noise).detach().cpu()
         array = np.transpose(
             vutils.make_grid(fake1, padding=2, normalize=True),
             (1, 2, 0)).numpy()
         im = np.array(
             Image.fromarray((array * 255).astype(np.uint8)).convert('RGB'))
         self.img = ImageTk.PhotoImage(image=Image.fromarray(im))
         self.imageMatrix.configure(image=self.img)
         self.imageMatrix.image = self.img
         self.imageMatrix.pack()
     elif (self.dataset.get() == 2):
         path = "models/landscapes/scenery" + str(self.num.get()) + ".model"
         firstModel = torch.load(path)
         netG = Generator(ngpu).to(self.device)
         netG.load_state_dict(firstModel["generator_state_dict"])
         netG.eval()
         fake1 = netG(self.noise).detach().cpu()
         array = np.transpose(
             vutils.make_grid(fake1, padding=2, normalize=True),
             (1, 2, 0)).numpy()
         im = np.array(
             Image.fromarray((array * 255).astype(np.uint8)).convert('RGB'))
         self.img = ImageTk.PhotoImage(image=Image.fromarray(im))
         self.imageMatrix.configure(image=self.img)
         self.imageMatrix.image = self.img
         self.imageMatrix.pack()
     elif (self.dataset.get() == 3):
         path = "models/art/art" + str(self.num.get() * 100) + ".model"
         firstModel = torch.load(path)
         netG = Generator(ngpu).to(self.device)
         netG.load_state_dict(firstModel["generator_state_dict"])
         netG.eval()
         fake1 = netG(self.noise).detach().cpu()
         array = np.transpose(
             vutils.make_grid(fake1, padding=2, normalize=True),
             (1, 2, 0)).numpy()
         im = np.array(
             Image.fromarray((array * 255).astype(np.uint8)).convert('RGB'))
         self.img = ImageTk.PhotoImage(image=Image.fromarray(im))
         self.imageMatrix.configure(image=self.img)
         self.imageMatrix.image = self.img
         self.imageMatrix.pack()
Beispiel #3
0
def lanzar_dados(dado1, jugador, dado2, total, n):
    h = 0
    n = n
    for i in range(n):
        dado1.append(
            Generator(m=i * 5 + 2, low=1, high=6, rounded=True).throw())
        dado2.append(Generator(m=9 + i, low=1, high=6, rounded=True).throw())
        if i > 0:
            while dado1[i] + dado2[i] in total:
                h += 1
                dado1[i] = Generator(a=h, low=1, high=6, rounded=True).throw()
                dado2[i] = Generator(c=h + 4, low=1, high=6,
                                     rounded=True).throw()
        total.append(dado1[i] + dado2[i])
    return total, dado1, dado2
Beispiel #4
0
 def showImages(path):
     device = torch.device("cuda:0" if (
         torch.cuda.is_available() and ngpu > 0) else "cpu")
     fixed_noise = torch.randn(64, nz, 1, 1, device=device)
     #total = torch.load(path)
     firstModel = torch.load(".art0.model")
     print(firstModel["generator_state_dict"])
     print("______________________________________________________________")
     netG = Generator(ngpu).to(device)
     netG.load_state_dict(firstModel["generator_state_dict"])
     netG.eval()
     fake1 = netG(fixed_noise).detach().cpu()
     firstModel = torch.load(".art1.model")
     print(firstModel["generator_state_dict"])
     netG.load_state_dict(firstModel["generator_state_dict"])
     netG.eval()
     fake2 = netG(fixed_noise).detach().cpu()
     plt.figure(figsize=(15, 15))
     plt.subplot(1, 2, 2)
     plt.axis("off")
     plt.title("Fake 1")
     plt.imshow(
         np.transpose(vutils.make_grid(fake1, padding=2, normalize=True),
                      (1, 2, 0)))
     plt.subplot(1, 2, 1)
     plt.axis("off")
     plt.title("Fake2")
     plt.imshow(
         np.transpose(vutils.make_grid(fake2, padding=2, normalize=True),
                      (1, 2, 0)))
     plt.show()
Beispiel #5
0
    def __init__(self, input_size, word_vec_dim, hidden_size, output_size, 
                n_layers=4, dropout_p=.2):
        self.input_size = input_size
        self.word_vec_dim = word_vec_dim
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.n_layers = n_layers
        self.dropout_p = dropout_p
        
        super(Seq2Seq, self).__init__()

        self.emb_src = nn.Embedding(input_size, word_vec_dim)
        self.emb_dec = nn.Embedding(output_size, word_vec_dim)

        self.encoder = Encoder(word_vec_dim, hidden_size,
                               n_layers=n_layers,
                               dropout_p=dropout_p)
        
        self.decoder = Decoder(word_vec_dim, hidden_size,
                               n_lyaers=n_layers,
                               dropout_p=dropout_p)
        
        self.attn = Attention(hidden_size)

        self.concat = nn.Linear(hidden_size*2, hidden_size)
        self.tanh = nn.Tanh()
        self.generator = Generator(hidden_size, output_size)
Beispiel #6
0
    def __init__(self, scene):
        self.scene = scene
        self.scene.onKeyboard = self.onKeyBoard

        self.background1 = Background("Images/background/background.png", 0)
        self.background1.locate(self.scene, self.background1.x, 0)
        self.background1.show()
        self.background2 = Background("Images/background/background.png", 1280)
        self.background2.locate(self.scene, self.background2.x, 0)
        self.background2.show()

        self.backgroundTimer = BackgroundTimer(0.01, self.scene,
                                               self.background1,
                                               self.background2)
        self.backgroundTimer.start()

        self.user = User("Images/character/user/run/user_1.png", self.scene,
                         0.9, self)
        self.user.locate(self.scene, self.user.x, self.user.y)
        self.user.show()

        def up_background_velocity():
            self.backgroundTimer.velocity += 1

        def down_background_velocity():
            self.backgroundTimer.velocity = 5

        self.user.up_background_velocity = up_background_velocity
        self.user.down_background_velocity = down_background_velocity

        self.generator = Generator(self.scene, self.user)
Beispiel #7
0
def generate(outputDir, db, kernels, libxsmmGenerator, architecture, prefix=''):
  Expr.analyseKernels(db, kernels)

  for matrixInfo in db.itervalues():
    matrixInfo.generateMemoryLayout(architecture, alignStartrow=matrixInfo.leftMultiplication)    
  for prototype in kernels:
    prototype.kernel.generateMemoryLayout(architecture, alignStartrow=True)

  print('\nKernels')
  print('-------')
  for prototype in kernels:
    print(u'{}: {}'.format(prototype.name, prototype.kernel.symbol))

  print('\nMemory layout')
  print('-------------')
  keys = db.keys()
  keys.sort(key=lambda s: s.lower())
  for key in keys:
    name = db[key].name
    for block in db[key].blocks:
      print('{:16} {}'.format(name, block))
      name = ''

  generator = Generator.Generator(db, libxsmmGenerator, architecture, prefix)
  generator.generateKernels(outputDir, kernels)
  generator.generateInitializer(outputDir)
  generator.generateUnitTests(outputDir, kernels)
Beispiel #8
0
def init_static_dialog_agent(args) :
    print "reading in Ontology"
    ont = Ontology.Ontology(sys.argv[1])
    print "predicates: " + str(ont.preds)
    print "types: " + str(ont.types)
    print "entries: " + str(ont.entries)

    print "reading in Lexicon"
    lex = Lexicon.Lexicon(ont, sys.argv[2])
    print "surface forms: " + str(lex.surface_forms)
    print "categories: " + str(lex.categories)
    print "semantic forms: " + str(lex.semantic_forms)
    print "entries: " + str(lex.entries)

    print "instantiating Feature Extractor"
    f_extractor = FeatureExtractor.FeatureExtractor(ont, lex)

    print "instantiating Linear Learner"
    learner = LinearLearner.LinearLearner(ont, lex, f_extractor)

    print "instantiating KBGrounder"
    grounder = KBGrounder.KBGrounder(ont)

    load_parser_from_file = False
    if len(args) > 4 :
        if args[4].lower() == 'true' :
            load_parser_from_file = True
            
    if load_parser_from_file :
        parser = load_model('static_parser')
        grounder.parser = parser
        grounder.ontology = parser.ontology
    else :
        print "instantiating Parser"
        parser = Parser.Parser(ont, lex, learner, grounder, beam_width=10, safety=True)

    print "instantiating Generator"
    generator = Generator.Generator(ont, lex, learner, parser, beam_width=sys.maxint, safety=True)

    print "instantiating DialogAgent"
    static_policy = StaticDialogPolicy.StaticDialogPolicy()
    A = StaticDialogAgent(parser, generator, grounder, static_policy, None, None)

    if not load_parser_from_file :
        print "reading in training data"
        D = A.read_in_utterance_action_pairs(args[3])

        if len(args) > 4 and args[4] == "both":
            print "training parser and generator jointly from actions"
            converged = A.jointly_train_parser_and_generator_from_utterance_action_pairs(
                D, epochs=10, parse_beam=30, generator_beam=10)
        else:
            print "training parser from actions"
            converged = A.train_parser_from_utterance_action_pairs(
                D, epochs=10, parse_beam=30)

        print "theta: "+str(parser.learner.theta)
        save_model(parser, 'static_parser')
    
    return A
Beispiel #9
0
    def __init__(self, dimensions=(100, 100)):
        self.dimensions = dimensions
        self.layers = {}
        self.generator = Generator.Generator()

        for layer_name in self.generator.generation_order:
            self.add_layer(layer_name)
Beispiel #10
0
def gird_model(name,
               day,
               timeDs,
               engine,
               priceRatio=0.2,
               geneRatio=0.2,
               loadRatio=0.1):
    #pd.set_option('display.max_columns', None)
    #pd.set_option('display.max_rows', None)
    res = []
    generator = Generator(name, day, engine, frash_ratio=geneRatio)
    load = Load(name, day, engine, frash_ratio=loadRatio)
    storage = Storage(name, engine)
    pricetable = Para_PriceTable(day, engine, frash_ratio=priceRatio)
    dayplan = Para_DayAheadPlan(day, engine)
    timenow = datetime(int(day[0:4]), int(day[5:7]), int(day[8:10]))
    timeD = timedelta(minutes=timeDs)

    while timenow < datetime(int(day[0:4]), int(day[5:7]), int(
            day[8:10])) + timedelta(days=1):
        print('Do:', timenow, '-------------------------------------------')
        policy = gird_model_dynamic(timenow, storage, load, generator,
                                    pricetable, dayplan)
        log = Do_policy(timenow, timeD, policy,
                        pricetable.get_PP_bytime_buy(timenow).price,
                        pricetable.get_PP_bytime_sale(timenow).price, storage,
                        load, generator, dayplan)
        res.append(log.tolist())
        timenow += timeD
        generator.refrash(timenow)
        load.refrash(timenow)
        pricetable.refrash(timenow)

    return pd.DataFrame(res, columns=Para_DoLog.get_names())
Beispiel #11
0
def generate_noisy_dataset():
    Gen = Generator()
    Gen.load_params(generator_weights)
    contexts = pickle.load(open("contexts",'r'))

    for c in contexts.iterkeys():
        contexts[c] = Gen.res_gen([c])
    return contexts
Beispiel #12
0
def norepalgo():
    contador1 = 0
    contador1S = 0
    contador2 = 0
    contador2S = 0
    contador3 = 0
    contador3S = 0
    contador4 = 0
    contador4S = 0
    contador5 = 0
    contador5S = 0
    contador6 = 0
    contadorG = 0

    rarrayt = []
    i = 0
    j = 3

    for n in range(19):

        while contadorG != 19:
            i += 1
            j += 1
            r = Generator(a=i, c=j / 2, low=1, high=6, rounded=True).throw()
            if r == 1 and contador1 != 4:
                contador1 += 1
                rarrayt.append(r + contador1S)
                contador1S += 10

            elif r == 2 and contador2 != 4:
                contador2 += 1
                rarrayt.append(r + contador2S)
                contador2S += 10

            elif r == 3 and contador3 != 4:
                contador3 += 1
                rarrayt.append(r + contador3S)
                contador3S += 10

            elif r == 4 and contador4 != 3:
                contador4 += 1
                rarrayt.append(r + contador4S)
                contador4S += 10

            elif r == 5 and contador5 != 3:
                contador5 += 1
                rarrayt.append(r + contador5S)
                contador5S += 10

            elif r == 6 and contador6 != 1:
                contador6 += 1
                rarrayt.append(r)

            contadorG = contador1 + contador2 + contador3 + contador4 + contador5 + contador6

    print("Arreglo global de los terrenos del tablero")
    return (rarrayt)
    print("")
Beispiel #13
0
 def __init__(self,
              input_shape,
              depth_layers_discriminator=[64, 128, 256, 512],
              depth_layers_generator=[1024, 512, 256, 128],
              dim_noise=100,
              model="simple",
              data="MNIST",
              flip_discri_labels=False,
              final_generator_activation="tanh",
              test_name='_1'):
     """DCGAN model (for each parameter, the default value is the value used in the DCGAN paper)
     :param input_shape: format [height, width, depth]
     :param depth_layers_discriminator: the depth of the different layers used by the discriminator, only for the
     dcgan models
     :param depth_layers_generator: the depth of the different layers used by the generator, only for the
     dcgan models
     :param dim_noise: size of the input noise used by the generator
     :param model: type of model to use (simple, intermediate, dcgan_custom, dcgan_vanilla)
     :param data: dataset used
     :param flip_discri_labels: flip labels in the computation of the discrimination loss (10% flip)
     :param final_generator_activation: activation function for the output layer of the generator
     :param test_name: to give a name to the current execution"""
     #Saving param
     self.test_name = test_name
     # Global model
     self.model = model
     self.data = data
     # Dimension of data
     self.output_height = input_shape[0]
     self.output_width = input_shape[1]
     self.output_depth = input_shape[2]
     self.dim_noise = dim_noise
     # Useful variables
     self.real_images_probabilities = 0
     self.fake_images_probabilities = 0
     # Build input variables
     #self.X_batch = tf.placeholder(dtype=tf.float32, shape=[None, self.output_depth*self.output_height*self.output_width], name='X')
     self.X_batch = tf.placeholder(dtype=tf.float32,
                                   shape=[
                                       None, self.output_height,
                                       self.output_width, self.output_depth
                                   ],
                                   name='real_images')
     self.noise_batch = tf.placeholder(dtype=tf.float32,
                                       shape=[None, self.dim_noise],
                                       name='noise')
     # Build both components
     self.final_generator_activation = final_generator_activation
     self.discriminator = Discriminator(input_shape,
                                        depth_layers_discriminator,
                                        model=model)
     self.generator = Generator(input_shape,
                                depth_layers=depth_layers_generator,
                                model=model,
                                data=data,
                                final_activation=final_generator_activation)
     # Construct the graph
     self.build_graph(flip_discri_labels=flip_discri_labels)
 def _generate_cpp(self , header):
     filename = self.filename + ".cpp"
     cppfile  = Generator.Generator(filename)
     self._write_author( self.cpp , cppfile )
     cppfile.Write('#include "{0}"\n'.format(header))
     self._write_using_namespace(cppfile)
     self._write_constructor(cppfile)
     self._write_deconstructor(cppfile)
     cppfile.Destory()
def start():
    global dataset
    # THIS IS THE DATA THAT IS RETRIEVED IN DATASET
    dataset = DataR.DatasetRetrieval()
    dataset = dataset.retrieveImages() # first half is orig images, 2nd half is pixelated images
    print('Loaded ', dataset[0].shape, dataset[0].shape[1:], " Image sizes")
    # Image shape is 96 x 96 x 3 in this dataset
    image_shape = dataset[0].shape[1:]

    # define descriminator model
    descrim_model = Discriminator.Discriminator(image_shape)
    descrim_model= descrim_model.define_discriminator()

    # Define generator model
    gen_model = Generator.Generator((32,32,3))
    gen_model= gen_model.define_gen()

    # GAN MODEL IMPLEMENTS BOTH GENERATOR AND DESCRIMINATOR INSIDE
    gan_model = define_GAN(gen_model, descrim_model,image_shape)
    n_patch = descrim_model.get_output_shape_at(1)[1] # size 1

    n_batches= dataset[0].shape[0]
    # unpack dataset
    train_hr, train_lr = dataset
    # num of batches per epoch
    ####################################
    #
    # Train Discriminator...
    #
    #####################################
    bat_per_epo = int(len(train_hr) / 1)
    # Calculates total iterations needed based on epochs (100 epochs)
    n_steps = bat_per_epo * 1000 #100,000 iterations...
    # iterate through each epoch through steps
    for i in range(n_steps):
        # retrieve real samples
        X_real_hr, X_real_lr,real_y1= generate_real_samples(n_batches, n_patch)
        # generate fake images
        X_fakeB,fake_y = Generator.generateFakeSamples(gen_model, X_real_lr, n_patch,)

        #X_real_hr = (X_real_hr + 1) / 2.0
        #X_real_lr = (X_real_lr + 1) / 2.0
        #X_fakeB = (X_fakeB + 1) / 2.0
        # Loss function of first set of real images
        _,d_loss_real = descrim_model.train_on_batch(X_real_hr,real_y1)

        # Loss function for fake images
        _,d_loss_fake = descrim_model.train_on_batch(X_fakeB,fake_y)
        d_loss= 0.5 * np.add(d_loss_real,d_loss_fake)# d_loss[0]--> shape(2,)
        _,g_loss,_= gan_model.train_on_batch(X_real_lr, [X_real_hr,real_y1]) #in_src,[gen_out,dis_out] model

        # Loss functions printed out
        print('>%d, dreal[%.4f], dfake[%.4f], g[%.4f]' % (i + 1, d_loss_real, d_loss_fake,g_loss))
        # save data after epoch
        if (i + 1) % (200) == 0:
            summarize_performance(i, gen_model)
Beispiel #16
0
class Tester():
    minSup = 2
    delta = 5
    upperDelta = 20
    size = 20
    nEvents = 6
    debug = False

    gen = Generator(42)

    eventSeq = gen.generate(size, nEvents)
    print('Event sequence:')
    print(eventSeq)

    bf = BruteForce(eventSeq, minSup, delta, upperDelta, debug)
    t1 = time.time()
    F1 = bf.apply()
    t2 = time.time()
    print('F1')
    print(F1)

    m = Meselo(eventSeq, minSup, delta, upperDelta, debug)
    t3 = time.time()
    F2 = m.apply()
    t4 = time.time()
    print('F2')
    print(F2)
    print("Comparison:" + str(F1 == F2))

    btime = (t2 - t1)
    mtime = (t4 - t3)
    print('btime:' + str(btime) + "VS" + 'mtime' + str(mtime))

    print('Other small custom test...')
    customSeq = gen.generateCustom()
    print('CustomSequence:')
    print(customSeq)
    bf2 = BruteForce(customSeq, 1, 3, 3, debug)
    m2 = Meselo(customSeq, 1, 3, 3, debug)
    t5 = time.time()
    F3 = bf2.apply()
    t6 = time.time()
    print('F3')
    print(F3)
    t7 = time.time()
    F4 = m2.apply()
    t8 = time.time()
    print('F4')
    print(F4)
    btime2 = (t6 - t5)
    mtime2 = (t8 - t7)
    print('btime:' + str(btime2) + "VS" + 'mtime' + str(mtime2))
    print('Comparison:' + str(F3 == F4))

    #    print(eventSeq)
    print('end........')
Beispiel #17
0
def one_shot(name):
    global dims,n_layers
    r_net = net.FFNN(n_layers)
    g_net = net.FFNN(n_layers)
    b_net = net.FFNN(n_layers)

    
    r_build = gen.Generator(dims,r_net)
    g_build = gen.Generator(dims,g_net)
    b_build = gen.Generator(dims,b_net)
    
    colours = [r_build,g_build,b_build]
    mapping = []
    for x in colours:
        x.genEnv(0)
        mapping.append(x.getEnv())
        
    main = scene.Draw(dims,mapping)
    main.save_image(f"Images//PerlinNoise{name}","PNG",main.res)
Beispiel #18
0
 def _generate_cpp(self , header):
     filename = self.filename + ".cpp"
     cppfile  = Generator.Generator(filename)
     self._write_author( self.cpp , cppfile )
     cppfile.Write('#include "gtest/gtest.h"\n'.format(header))
     cppfile.Write('#include "{0}.h"\n'.format(self.cpp.name))
     cppfile.Write('#include "{0}.h"\n'.format(header))
     self._write_using_namespace(cppfile)
     self._write_gtest(cppfile)
     cppfile.Destory()
def Adveraisl_training(G, g_op, D, d_op, d_loader):

    G_beta = Generator(embed_size,
                       hidden_size,
                       vocab_size,
                       num_layers,
                       use_cuda=cuda)
    restore(G_beta, G_path)
    log = open('save/log.txt', 'w')
    print('start Adveraisl_training!')

    for i in range(epochs):
        #train G
        start = time.time()
        buffer = 'start epoch{}'.format(i + 1)
        print(buffer)
        log.write(buffer)
        for _ in range(g_step):
            g_op.zero_grad()
            #Frist generate a whole seqence
            samples, preds = G.generate_sample(batch_size, seq_len)
            #Then get reward step by step
            rewards = get_rewards(G_beta, D, samples, rollout_num)
            loss = G.PG_loss(samples, preds, rewards)
            buffer = 'reward = {:.3f} PGloss={:.3f}'.format(
                (rewards.mean()).data.item(), (loss).data.item())
            print(buffer)
            log.write(buffer)
            loss.backward()
            clip_grad_norm_(G.parameters(), max_norm=max_norm)
            g_op.step()

        #updata G_bata
        updataG_beta(G_beta, G, update_rate)

        #teain D
        for _ in range(d_step):
            G.get_negsample(neg_path, neg_num, seq_len)
            d_loader.create_batch(pos_num, batch_size)
            for r in range(3):
                d_loader.reset_pointer()
                losses = 0
                for j in range(d_loader.batch_num):
                    d_op.zero_grad()
                    sentence, labels = d_loader.next_batch()
                    loss = D.loss_fn(sentence, labels)
                    losses += loss
                    loss.backward()
                    d_op.step()
        loss = test(G, g_loader)
        buffer = 'test loss={:.3f}'.format((loss))
        log.write(buffer)
        print('epoch{}Done! cost{:.2f}s'.format(i + 1, time.time() - start))
        save(D, D_path)
        save(G, G_path)
Beispiel #20
0
def main():
    with tf.Graph().as_default():
        with tf.device("/gpu:1"):
            session_conf = tf.ConfigProto(
                allow_soft_placement=FLAGS.allow_soft_placement,
                log_device_placement=FLAGS.log_device_placement)
            sess = tf.Session(config=session_conf)

            with sess.as_default(), open(log_precision, "w") as log, open(
                    loss_precision, "w") as loss_log:

                DIS_MODEL_FILE = "model/irgan_eval_pre-trained.model"  # overfitted DNS
                param = pickle.load(open(DIS_MODEL_FILE, "rb"))  #add

                loss_type = "pair"

                with tf.name_scope('discriminator_setting') as scope:
                    discriminator = Discriminator.Discriminator(
                        sequence_length=FLAGS.max_sequence_length,
                        batch_size=FLAGS.batch_size,
                        vocab_size=len(vocab),
                        embedding_size=FLAGS.embedding_dim,
                        filter_sizes=list(
                            map(int, FLAGS.filter_sizes.split(","))),
                        num_filters=FLAGS.num_filters,
                        learning_rate=FLAGS.learning_rate,
                        l2_reg_lambda=FLAGS.l2_reg_lambda,
                        # embeddings=embeddings,
                        embeddings=None,
                        paras=param,
                        loss=loss_type)

                with tf.name_scope('generator_setting') as scope:
                    generator = Generator.Generator(
                        sequence_length=FLAGS.max_sequence_length,
                        batch_size=FLAGS.batch_size,
                        vocab_size=len(vocab),
                        embedding_size=FLAGS.embedding_dim,
                        filter_sizes=list(
                            map(int, FLAGS.filter_sizes.split(","))),
                        num_filters=FLAGS.num_filters,
                        learning_rate=FLAGS.learning_rate * 0.1,
                        l2_reg_lambda=FLAGS.l2_reg_lambda,
                        # embeddings=embeddings,
                        embeddings=None,
                        paras=param,
                        loss=loss_type)

                sess.run(tf.global_variables_initializer())
                tw = tf.summary.FileWriter("log_dir", graph=sess.graph)  #add
                evaluation(sess, discriminator, log, 0)
                '''
Beispiel #21
0
 def __init__(self, master, controller):
     # parameters that you want to send through the Frame class.
     Frame.__init__(self, master)
     self.controller = controller
     #reference to the master widget, which is the tk window
     self.master = master
     self.device = torch.device("cuda:0" if (
         torch.cuda.is_available() and ngpu > 0) else "cpu")
     self.noise = torch.randn(64, nz, 1, 1, device=self.device)
     self.model = Generator(ngpu).to(self.device)
     self.dataset = IntVar()
     self.num = IntVar()
     self.init_window()
 def __init__(self,item,customer):
     
     self.item = item
     self.customer = customer
     self.seller = Seller(item,customer)
     self.customer_sequence = []
     self.generator = Generator(customer)
     self.revenue = {}
     #self.revenue_upper_bound = {}
     #只需要获取最后一天的upper_bound
     self.revenue_upper_bound=0
     
     self.time_length = 0
     self.IB_function_type = ''
     self.IB_function_type_sequence = []
     self.customer_choose_mode = ''
     
     self.log = {}
Beispiel #23
0
def main():
    ''' Iterate over all .txt files in the directory.  This is useful for
	making a script that periodically scans all feeds for updates, without
	opening a new browser for each feed '''
    for feed in os.listdir("."):

        if feed.endswith(".txt"):

            input_file = open(feed, "r")
            information = input_file.readlines()
            ''' Strip newline from every line of input '''
            i = 0
            info_len = len(information)
            while i < info_len:
                information[i] = information[i].rstrip('\n')
                i += 1
            ''' Grab the information to send to the generator '''
            title = information[0]
            link = information[1]
            description = information[2]

            browser.get(link)

            title_selector = information[3]
            link_selector = information[4]
            description_selector = information[5]

            generator = Generator(browser, title, link, description)
            generator.find_new_articles(title_selector, link_selector,
                                        description_selector)

            if generator.error_no_new_articles() is True:
                continue

            if os.path.isfile(generator.get_filename()) is False:
                generator.generate_new_feed()
            else:
                generator.append_feed()

            generator.archive_new_feeds()

            input_file.close()

    browser.quit()
Beispiel #24
0
def uppercase():
    url = request.GET.get('s', default=None)

    if url is not None:
        #url = "http://www.apple.com/legal/internet-services/itunes/us/terms.html"
        sc = Scrapper.Scrapper()
        obj = sc.scrap(url)
        #print obj
        #obj = json.dumps({'privacy':{'p':[{'name':'first para in privacy'},{'name':'second para in privacy'}] },
        # 'taxes':{'p':[{'name':'first para in taxes'},{'name':'second para in taxes'}] }
        #})
        obj = Analysis.Analysis().analysis(obj)

        #obj = json.dumps({'privacy':{'classify':'computer software','p':[{'name':'first para in privacy','summarizer':'example of summarized paragraph','tags':['tag1','tag2','tag3']},{'name':'second para in privacy','summarizer':'example of summarized paragraph','tags':['tag1','tag2','tag3']}] },
        # 'taxes':{'classify':'computer software','p':[{'name':'first para in taxes','summarizer':'example of summarized paragraph','tags':['tag1','tag2','tag3']},{'name':'second para in taxes','summarizer':'example of summarized paragraph','tags':['tag1','tag2','tag3']}] }
        #})

        obj = Generator.Generator().generate(obj)

        return json.dumps(obj, indent=4)
Beispiel #25
0
 def _genator_header(self):
     filename = self.filename + ".h"
     hfile = Generator.Generator(filename)
     self._write_author(self.cpp, hfile)
     defineHeader = self.filename.upper()
     hfile.Write("#ifndef __{0}_H__\n".format(defineHeader))
     hfile.Write("#define __{0}_H__\n".format(defineHeader))
     self._write_include(hfile)
     self._write_namespace_begin(hfile)
     hfile.Write("{0}class {1} {2}\n".format(self.format, self.filename,
                                             self.extend))
     hfile.Write("{0}{1}\n".format(self.format, '{'))
     hfile.Write("{0}public:\n".format(self.format))
     self._write_constuctor(hfile)
     self._write_deconstructor(hfile)
     hfile.Write("{0}{1};\n".format(self.format, '}'))
     self._write_namespace_end(hfile)
     hfile.Write("#endif\n".format())
     hfile.Destory()
     return filename
Beispiel #26
0
    def open_map(self):
        # Clear all map tiles
        for i in range(15, 20):
            self.image_tiles[i]["tilesImages"] = []

        # Decides whether to generate a new map
        new = raw_input("Generate New map (Y/N): ")

        if new == "Y" or new == "y":
            g = Generator.Generator(self.TILE_SIZE, self.image_tiles)
            name = g.generate_maps()
            self.parse_data_file(name)
        else:
            while True:
                name = raw_input("map Name? ")
                if os.path.exists(name):
                    break
                else:
                    print "map doesn't exist"

            self.parse_data_file(name)

        self.build_tiles()
Beispiel #27
0
 def setup(cls):
     cls.generatorA = Generator(65, 16807)
     cls.generatorB = Generator(8921, 48271)
Beispiel #28
0
def PatternTheory(inputFile, semanticBondPath, topK, outFile):
    BondID = count(0)
    genID = count(0)
    localSwapSpace = {}
    topKLabel = 0
    priorScale = 10  #changed from 100 to 10 on 3/28/2020 7:29 PM

    #Load Feature labels and create feature generators
    equivalence = {}
    feature_generators = []
    with open(inputFile) as f:
        for featFile in f:
            feat = str.split(featFile.replace('\n', ''))
            feat = str.split(featFile.replace('/home/saakur/Desktop/', './'))
            equivalence = {}
            with open(feat[1]) as fl:
                for line in fl:
                    l = str.split(line.replace('\n', ''))
                    equivalence[l[0]] = float(l[1])
            if topKLabel > 0:
                sorted_equiv = sorted(equivalence.items(),
                                      key=operator.itemgetter(1),
                                      reverse=True)
                equivalence = {}
                for k, v in sorted_equiv[:topKLabel]:
                    equivalence[k] = v * supBondWeight

            G = gen.Generator(next(genID), feat[0], feat[0], "Feature",
                              feat[0])
            supBG = sup_b.SupportBond(next(BondID), "SupportBond", "OUT",
                                      G.generatorID)
            supBG.compatible = deepcopy(equivalence)
            G.addOutBond(supBG)
            feature_generators.append(G)
            localSwapSpace[G.feature] = []

    filelist = [f for f in os.listdir(semanticBondPath) if f.endswith(".txt")]

    semBondDict = {}

    #Load semantic bonds
    for semanticBondFile in filelist:
        semBondName = str.split(
            str.split(semanticBondFile.replace('\n', ''), '_').pop(), '.')[0]
        if semBondName not in semBondDict.keys():
            semBondDict[semBondName] = {}

        equivalence = {}
        with open(semanticBondPath + semanticBondFile) as fl:
            for line in fl:
                semBondDict_Concept = {}
                l = str.split(line.replace('\n', ''), ',')
                label = l.pop(0)
                label = str.split(label, '-')[0]

                for l1 in l:
                    l2 = str.split(l1.replace('\n', ''), ':')

                    if (semBondName != "Similarity"):
                        semBondDict_Concept[l2[0]] = float(
                            l2[1]) * priorScale * semBondWeight
                    else:
                        semBondDict_Concept[l2[0]] = float(
                            l2[1]) * semBondWeight * priorScale
                equivalence[label] = dict(semBondDict_Concept.copy())
        semBondDict[semBondName] = dict((equivalence.copy()))

    #For each of the feature generators, create generators for each of the label possibilties
    # and bonds for each of the generators
    for g in feature_generators:
        #Determine the label category to create generators for them
        genType = g.generatorName

        #For each of the outbound bonds of the feature generators, create the generators for each of the label candidates
        for bondID in g.outBonds.keys():
            b = g.outBonds[bondID]
            #For each of the each of the label candidates, create the generators and bonds
            for label in b.compatible:
                #Create the generator
                G = gen.Generator(next(genID), label, label, genType,
                                  g.generatorName)
                #Create and add complementary support bond
                supBG_IN = sup_b.SupportBond(next(BondID), "SupportBond", "IN",
                                             G.generatorID)
                G.addInBond(supBG_IN)
                for semBondName in semBondDict.keys():
                    if label in semBondDict[semBondName].keys():
                        #Create a semantic bond
                        semBG_OUT = sem_b.SemanticBond(next(BondID),
                                                       semBondName, "OUT",
                                                       G.generatorID)
                        #Add the equivalence table for the semantic bond
                        semBG_OUT.compatible = deepcopy(
                            semBondDict[semBondName][label])
                        #Create complementary semantic bond
                        semBG_IN = sem_b.SemanticBond(next(BondID),
                                                      semBondName, "IN",
                                                      G.generatorID)
                        #Add created bonds to the generator
                        G.addOutBond(semBG_OUT)
                        G.addInBond(semBG_IN)

                #Add created generator to list with generators of equivalent modality
                localSwapSpace[G.feature].append(G)

    globalSwapSpace = {}
    for fg in feature_generators:
        globalSwapSpace[fg.generatorID] = fg

    debugFIle = "./debugFile.txt"
    # globalProposalChance Helps avoid local minima. Need to be less than 1 when using MCMC candidate proposal
    globalProposalChance = 1.0
    PS = inf.Inference(localSwapSpace, globalSwapSpace, False, debugFIle, topK,
                       globalProposalChance)
    topKConfig = PS.run_inference()

    topVal = 1
    ftFile = open(outFile, 'w')
    ftFile.write("Top %s results:\n" % topK)
    ftFile.close()
    for key in sorted(topKConfig):
        topKConfig[key].printConfig(outFile, localSwapSpace.keys())
        topVal += 1

    # Clean up for Anneal
    filelist = [f for f in os.listdir(".") if f.endswith(".state")]
    for f in filelist:
        os.remove(f)
Beispiel #29
0
def process(fold_index=0):
    with tf.Graph().as_default():
        #with tf.device('/gpu:0'):
        session_conf = tf.ConfigProto(
            allow_soft_placement=FLAGS.allow_soft_placement,
            log_device_placement=FLAGS.log_device_placement)
        sess = tf.Session(config=session_conf)
        with sess.as_default(), open(precision_log + str(fold_index),
                                     'w') as log:
            if len(sys.argv) > 1:
                paramG = cPickle.load(open(pre_trained_path + sys.argv[1],
                                           'r'))
                paramD = cPickle.load(open(pre_trained_path + sys.argv[2],
                                           'r'))
            else:
                paramG = None
                paramD = None
            eb_samples, pro_samples = cPickle.load(
                open(path + 'train_samples' + str(fold_index), 'r'))
            query_prof_dim = len(pro_samples[0][0])
            response_prof_dim = len(pro_samples[0][-1])
            batch_size = FLAGS.batch_size
            num_batches = int(math.ceil(len(eb_samples) / batch_size))
            print np.shape(eb_samples), np.shape(
                pro_samples), query_prof_dim, response_prof_dim
            dis = Discriminator.Discriminator(
                FLAGS.max_sequence_len, FLAGS.batch_size, len(vocab),
                FLAGS.embedding_dim,
                list(map(int, FLAGS.filter_sizes.split(","))),
                FLAGS.num_filters, query_prof_dim, response_prof_dim,
                FLAGS.dropout, FLAGS.l2_reg, FLAGS.learning_rate, paramD, None,
                FLAGS.loss, True, FLAGS.score_type)
            gen = Generator.Generator(
                FLAGS.max_sequence_len, FLAGS.batch_size, len(vocab),
                FLAGS.embedding_dim,
                list(map(int, FLAGS.filter_sizes.split(","))),
                FLAGS.num_filters, query_prof_dim, response_prof_dim,
                FLAGS.dropout, FLAGS.l2_reg, FLAGS.learning_rate / 50, paramG,
                None, FLAGS.loss, True, FLAGS.score_type)
            sess.run(tf.global_variables_initializer())

            for i in range(FLAGS.num_epochs):
                #g_batch_size = len(eb_samples)
                for g_epoch in range(FLAGS.g_epochs_num):
                    #if i == 0:
                    #    break
                    step, current_loss, positive,negative, pos_score, neg_score = 0, 0.0, 0.0, 0.0, [],[]
                    for _index, row in enumerate(eb_samples):
                        #candidate_index = range(len(eb_samples))
                        #sampled_index = list(np.random.choice(candidate_index, size=[batch_size],replace=False))
                        #if _index not in sampled_index:
                        #    sampled_index[-1] = _index
                        #sampled_index = list(candidate_index)
                        for ib in range(1):
                            ib = _index / batch_size
                            end_index = min((ib + 1) * batch_size,
                                            len(eb_samples))
                            eb_sample_pools = eb_samples[end_index -
                                                         batch_size:end_index]
                            pro_sample_pools = pro_samples[
                                end_index - batch_size:end_index]
                            feed_dict = {
                                gen.input_x_1: [row[0]] * batch_size,
                                gen.input_x_2: [row[1]] * batch_size,
                                gen.input_x_3: eb_sample_pools[:, 2],
                                gen.prof_1:
                                [pro_samples[_index][0]] * batch_size,
                                gen.prof_2:
                                [pro_samples[_index][1]] * batch_size,
                                gen.prof_3: list(pro_sample_pools[:, 2])
                            }
                            predicted = sess.run(gen.gan_score, feed_dict)
                            exp_rating = np.exp(np.array(predicted) * 10)
                            prob = exp_rating / np.sum(exp_rating)
                            neg_index = np.random.choice(range(batch_size),
                                                         size=[FLAGS.gan_k],
                                                         p=prob,
                                                         replace=False)
                            feed_dict = {
                                dis.input_x_1: [row[0]] * FLAGS.gan_k,
                                dis.input_x_2: [row[1]] * FLAGS.gan_k,
                                dis.input_x_3: eb_sample_pools[neg_index][:,
                                                                          2],
                                dis.prof_1:
                                [pro_samples[_index][0]] * FLAGS.gan_k,
                                dis.prof_2:
                                [pro_samples[_index][1]] * FLAGS.gan_k,
                                dis.prof_3:
                                list(pro_sample_pools[neg_index][:, 2])
                            }
                            reward = sess.run(dis.reward, feed_dict)
                            feed_dict = {
                                gen.input_x_1: eb_sample_pools[:, 0],
                                gen.input_x_2: eb_sample_pools[:, 1],
                                gen.input_x_3: eb_sample_pools[:, 2],
                                gen.prof_1: list(pro_sample_pools[:, 0]),
                                gen.prof_2: list(pro_sample_pools[:, 1]),
                                gen.prof_3: list(pro_sample_pools[:, 2]),
                                gen.neg_index: neg_index,
                                gen.reward: reward
                            }
                            _, step, current_loss, gan_score, pos, neg, pos_score, neg_score = sess.run(
                                [
                                    gen.gan_updates, gen.global_step,
                                    gen.gan_loss, gen.gans, gen.positive,
                                    gen.negative, gen.pos_score, gen.neg_score
                                ], feed_dict)
                            #print pos_score[:1], neg_score[:1],current_loss, step, _index, len(eb_samples)
                    line = (
                        "%s: GEN step %d %d, loss %f  gan score %f ,pos score %f, neg score %f, total step: %d "
                        % (timestamp(), step, i, current_loss, gan_score, pos,
                           neg, FLAGS.num_epochs * FLAGS.g_epochs_num *
                           len(eb_samples)))
                    print line
                    log.write(line + "\n")
                d_eb_samples, d_pro_samples = gan_samples(
                    eb_samples, pro_samples, sess, gen)
                for d_epoch in range(FLAGS.d_epochs_num):
                    step, current_loss, accuracy = 0, 0.0, 0.0
                    for ib in range(num_batches):
                        end_index = min((ib + 1) * batch_size,
                                        len(d_eb_samples))
                        eb_batch = d_eb_samples[end_index -
                                                batch_size:end_index]
                        pro_batch = d_pro_samples[end_index -
                                                  batch_size:end_index]
                        feed_dict = {
                            dis.input_x_1: eb_batch[:, 0],
                            dis.input_x_2: eb_batch[:, 1],
                            dis.input_x_3: eb_batch[:, 2],
                            dis.prof_1: list(pro_batch[:, 0]),
                            dis.prof_2: list(pro_batch[:, 1]),
                            dis.prof_3: list(pro_batch[:, 2])
                        }
                        _, step, current_loss, accuracy, pos_score, neg_score = sess.run(
                            [
                                dis.updates, dis.global_step, dis.loss,
                                dis.accuracy, dis.positive, dis.negative
                            ], feed_dict)

                    line = (
                        "%s: Dis step %d %d, loss %f with acc %f, pos score %f, neg score %f, total step: %d "
                        % (timestamp(), step, i, current_loss, accuracy,
                           pos_score, neg_score, FLAGS.num_epochs *
                           FLAGS.d_epochs_num * num_batches))
                    print line
                    log.write(line + '\n')
                if i != FLAGS.num_epochs - 1:
                    evaluation(sess, gen, log, batch_size,
                               path + 'test_samples' + str(fold_index))
                    evaluation(sess, dis, log, batch_size,
                               path + 'test_samples' + str(fold_index))
            evaluation(sess, gen, log, batch_size,
                       path + 'test_samples' + str(fold_index), True)
            evaluation(sess, dis, log, batch_size,
                       path + 'test_samples' + str(fold_index), True)
Beispiel #30
0
import chainer
from chainer import serializers
import nn.DCGAN import Generator
from utils import config_parse as cp
from computations.trainer import generate_training_video

MODEL_PATH = './models/generator.npz'
N_GENERATIONS = 100

conf_parser = cp.Config('setup.ini')
params = conf_parser.get_params()
generator = Generator(params.ch, params.latent_dim)
serializers.load_npz(MODEL_PATH, generator)

for i in range(N_GENERATIONS):
    print(f'Generator video {i} / {N_GENERATIONS}', end='\r')
    generate_training_video(generator, filename=f'./gifs/gif_{i}.gif')