Пример #1
0
    async def process_message(self, msg):
        fwrd = traverse(await msg.get_full_forwarded())

        try:
            ensure_future(self.bot.handler.process(next(fwrd)))
        except StopIteration:
            pass
Пример #2
0
def find_next_lower(track: Track, this: Point) -> (Point, Point):
    lowest = this.next
    for point in traverse(this):
        if point.max_velocity <= this.max_velocity:
            return point, lowest
        if point.max_velocity < lowest.max_velocity:
            lowest = point
    return track.points[-1], lowest
Пример #3
0
def plot_tf_idf_post(dictionary_tf_idf, title):
    dic_post = dict(dictionary_tf_idf[title])
    dic_post_travers = {}
    for term, val in dic_post.items():
        dic_post_travers[utils.traverse(term)] = val
    df2 = pd.DataFrame.from_dict(dic_post_travers,
                                 orient='index').sort_values(by=0,
                                                             ascending=False)
    pl = df2.plot(kind='bar',
                  figsize=(15, 7),
                  fontsize=8,
                  legend=False,
                  title=utils.traverse(title))
    for p in pl.patches:
        pl.annotate(str(p.get_height()),
                    (p.get_x() * 0.98, p.get_height() * 1.001),
                    fontsize=14)
    plt.show()
Пример #4
0
 def __iter__(self):
     file_paths = utils.traverse(self.repo.name)
     for file_ in file_paths:
         dicts = []
         for line in utils.get_blame(file_):
             new_dict = utils.line_to_dict(line)
             self.expand_dict(new_dict, file_)
             dicts.append(new_dict)
         if self.merged:
             dicts = self.merge(dicts)
         for d in dicts:
             yield d
Пример #5
0
 def __iter__(self):
     file_paths = utils.traverse(self.repo.name)
     for file_ in file_paths:
         dicts = []
         for line in utils.get_blame(file_):
             new_dict = utils.line_to_dict(line)
             self.expand_dict(new_dict, file_)
             dicts.append(new_dict)
         if self.merged:
             dicts = self.merge(dicts)
         for d in dicts:
             yield d
Пример #6
0
def remove_SimpleName(root):
    for node in traverse(root):
        if "=" not in node.label and "(SimpleName)" in node.label:
            if node.children[0].label[:11] != "identifier=":
                raise Exception("ERROR!")
            node.label = "SimpleName_" + node.children[0].label[11:]
            node.children = []
        elif node.label[:11] == "identifier=":
            node.label = "SimpleName_" + node.label[11:]
        elif node.label[:6] == "value=":
            node.label = "Value_" + node.label[6:]

    return root
Пример #7
0
def create_word_cloud(no_topics, lda, feature_names):
    for i in range(0, no_topics):
        d = dict(zip(utils.traverse(feature_names), lda.components_[i]))
        wc = wordcloud.WordCloud(background_color='white',
                                 max_words=50,
                                 stopwords=utils.get_stop_words())
        image = wc.generate_from_frequencies(d)
        image.to_file(WHERE_OUTPUTS / 'outputs' + r'\Topic' + str(i + 1) +
                      '.png')
        plt.figure()
        plt.imshow(wc, interpolation='bilinear')
        plt.axis("off")
        plt.show()
Пример #8
0
def plot_part_of_day(dictionary_time, title):
    df2 = pd.DataFrame.from_dict(dictionary_time,
                                 orient='index').sort_values(by=0,
                                                             ascending=False)
    pl = df2.plot(kind='bar',
                  figsize=(15, 7),
                  fontsize=8,
                  legend=False,
                  title=utils.traverse(title))
    for p in pl.patches:
        pl.annotate(str(p.get_height()),
                    (p.get_x() * 0.98, p.get_height() * 1.001),
                    fontsize=14)
    plt.show()
Пример #9
0
    async def process_message(self, msg):
        if not msg.is_multichat:
            return await msg.answer(
                "Данную команду можно использовать только в беседах!")

        if self.admins_only and msg.user_id not in self.admins:
            return await msg.answer("Вы не являетесь администратором.")

        command, text = self.parse_message(msg)

        if command in self.unkick:
            inv = True
        else:
            inv = False

        parts = text.split(" ")
        kick_time = 300
        puid = None

        for m in traverse(await msg.get_full_forwarded()):
            if m.user_id and m.true_user_id != msg.user_id:
                puid = m.true_user_id
                break

        if len(parts) > 0 and parts[0].isdigit():
            if puid is not None:
                kick_time = int(parts[0])

            else:
                puid = int(parts[0])

        if not puid:
            return await msg.answer(
                f"Введите ID пользователя, которого хотите "
                f"{'вернуть или добавить' if inv else 'выкинуть'}.")

        if inv:
            if puid in self.exiled:
                del self.exiled[puid]

            return await self.api.messages.addChatUser(chat_id=msg.chat_id,
                                                       user_id=puid)

        if len(parts) > 1 and " ".join(parts[1:]).isdigit():
            kick_time = int(" ".join(parts[1:]))

        self.exiled[puid] = time.time() + kick_time

        await self.api.messages.removeChatUser(chat_id=msg.chat_id,
                                               user_id=puid)
Пример #10
0
    def extract_action(self, posted):
        """
        Retrieve the action name for the actions taken by the user (i.e. submitted
        through an http post request).
        """
        _actions = [action for action in utils.traverse(self.actions) \
                                      if action.html_id in posted]

        if len(_actions) > 1:
            raise ValueError('Cannot process more than one action.')
        elif len(_actions) == 0:
            return None
        else:
            return _actions[0]
Пример #11
0
def process_file(input_file):
	file_text=''
	with open(input_file, 'r') as content_file:
	  file_text = content_file.read().decode('utf-8')

	#sentences= nltk.sent_tokenize(file_text)
	sentences=file_text.split('\n')
	noun_phrases=[] 
	train_sents = conll2000.chunked_sents('train.txt', chunk_types=['NP'])
	test_sents = conll2000.chunked_sents('test.txt', chunk_types=['NP']) 
	chunker = BigramChunker(train_sents)
	print (chunker.evaluate(test_sents))
	for sent in sentences:
	  if not sent:
	    continue
	  tokens = nltk.word_tokenize(sent)
	  if len(tokens)>0:
	    tagged = nltk.pos_tag(tokens)
	    chunked = chunk_np(tagged)
	    #chunked = chunker.parse(tagged)
	    #chunked.draw()
	    utils.traverse(chunked)
	  
	  """
Пример #12
0
def modifier(root, dic):
    for node in traverse(root):
        if is_SimpleName(node.label):
            if node.label not in dic:
                node.label = "SimpleName_<UNK>"
        elif is_value(node.label):
            if node.label not in dic:
                if isnum(node.label):
                    node.label = "Value_<NUM>"
                else:
                    node.label = "Value_<STR>"
        else:
            node.label = get_bracket(node.label)
        if node.label not in dic:
            raise Exception("Unknown word", node.label)

    return root
Пример #13
0
    async def process_message(self, msg):
        command, otext = self.parse_message(msg)

        i, url, name, last_name, timestamp = None, None, None, None, None

        for m in traverse(await msg.get_full_forwarded()):
            if m.full_text:
                if i == m.true_user_id:
                    text += "\n" + m.full_text
                    continue
                elif i is not None:
                    break

                i = m.true_user_id
                timestamp = m.timestamp

                u = await self.api.users.get(user_ids=i, fields="photo_max")
                if not u:
                    continue

                u = u[0]

                url = u["photo_max"]
                name = u["first_name"]
                last_name = u["last_name"]

                text = m.full_text
        else:
            if i is None:
                return await msg.answer("Нечего цитировать!")

        async with aiohttp.ClientSession() as sess:
            async with sess.get(url) as response:
                img = Image.open(io.BytesIO(await response.read()))
                img = img.resize((200, 200), Image.NEAREST)

        result = await self.run_in_executor(self.make_image, img, text, name,
                                            last_name, timestamp, otext)

        if isinstance(result, str):
            return await msg.answer(result)

        attachment = await upload_photo(self.api, result, msg.user_id)

        return await msg.answer(attachment=str(attachment))
Пример #14
0
    def render(self, request, compress=True):
        request = self._get_request(request)

        if self.resource:
            return self.resource(request).GET()

        path = self.path

        resource = traverse(path, request)
        if resource is None:
            return u''

        gresource = IResource(resource, None)
        if gresource is not None:
            try:
                return gresource.render(request)
            except Exception, err:
                log_exc(str(err))
                raise
Пример #15
0
    def validate(self, action):
        """
        Validates ESGF issue template against predefined JSON schema

        :param str action: The issue action/command
        :raises Error: If the template has an invalid JSON schema
        :raises Error: If the project option does not exist in esg.ini
        :raises Error: If the description is already published on GitHub
        :raises Error: If the landing page or materials urls cannot be reached
        :raises Error: If dataset ids are malformed

        """
        logging.info('Validating of issue...')
        # Load JSON schema for issue template
        with open(__JSON_SCHEMA_PATHS__[action]) as f:
            schema = load(f)
        # Validate issue attributes against JSON issue schema
        try:
            validate(self.json, schema)
        except ValidationError as ve:
            logging.exception('Validation has encountered an issue, error stack {}'.format(ve.message), 6)
            logging.exception('The value that has caused this behavior is {0}, picked up by the validator {1}'.format(
                ve.validator_value, ve.validator))
            sys.exit(6)
        except ValueError as e:
            logging.exception(e.message)
        except Exception as e:
            logging.exception(repr(e.message))
            logging.exception('Validation Result: FAILED // {0} has an invalid JSON schema, error code: {1}'.
                              format(self.issue_path, 1))
            sys.exit(1)
        # Test landing page and materials URLs
        urls = filter(None, traverse(map(self.json.get, ['url', 'materials'])))
        if not all(map(test_url, urls)):
            logging.error('Validation Result: FAILED // URLs cannot be reached, error code {}'.format(2))
            sys.exit(1)
        # Validate the datasets list against the dataset id pattern
        if not all(map(test_pattern, self.json['datasets'])):
            logging.error('Validation Result: FAILED // Dataset IDs have invalid format, error code: {}'.format(3))
            sys.exit(1)
        logging.info('Validation Result: SUCCESSFUL')
Пример #16
0
    async def process_message(self, msg):
        text = msg.text
        for p in self.prefixes:
            if msg.text.startswith(p):
                text = text.replace(p, "", 1)
                break

        else:
            return

        if text == self.commands[6]:
            users = ""

            temp = ""
            temp_amount = 0

            for bu in self.banset:
                temp += bu + ","
                temp_amount += 1

                if temp_amount > 99:
                    for u in (await self.api.users.get(user_ids=temp) or []):
                        users += u["first_name"] + " " + u["last_name"] + f" ({u['id']}), "

                    temp = ""
                    temp_amount = 0

            if temp_amount:
                for u in (await self.api.users.get(user_ids=temp) or []):
                    users += u["first_name"] + " " + u["last_name"] + f" ({u['id']}), "

            return await msg.answer("Заблокированные пользователи:\n" + (users[:-2] if users[:-2] else "Нет"))

        if text == self.commands[7]:
            a_users = ""
            for u in (await self.api.users.get(user_ids=",".join(str(u) for u in self.admins)) or []):
                a_users += u["first_name"] + " " + u["last_name"] + f" ({u['id']}), "

            m_users = ""
            for u in (await self.api.users.get(user_ids=",".join(str(u) for u in self.moders)) or []):
                m_users += u["first_name"] + " " + u["last_name"] + f" ({u['id']}), "

            return await msg.answer("Администраторы:\n" + (a_users[:-2] if a_users[:-2] else "Нет") + "\n"
                                    "Модераторы:\n" + (m_users[:-2] if m_users[:-2] else "Нет"))

        if not msg.data["is_admin"]:
            return await msg.answer("Вы не администратор!")

        puid = ""

        for m in traverse(await msg.get_full_forwarded()):
            if m.user_id and m.true_user_id != msg.user_id:
                puid = m.true_user_id
                break

        if not puid:
            puid = text.split(" ")[-1]

            if puid.isdigit():
                puid = int(puid)
            else:
                puid = ""

        if not puid:
            return await msg.answer(f"Ошибка при определении id пользователя!")

        if text.startswith(self.commands[0]):
            if puid in self.banset:
                return await msg.answer("Уже забанен!")

            if puid in self.admins:
                return await msg.answer("Нельзя забанить администратора!")

            self.banset.append(puid)
            return await msg.answer(f"Успешно забанен: {puid}!")

        if text.startswith(self.commands[1]):
            if puid not in self.banset:
                return await msg.answer(f"Пользователь не забанен: {puid}!")
            else:
                self.banset.remove(puid)
                return await msg.answer(f"Пользователь разбанен: {puid}!")

        if text.startswith(self.commands[2]):
            if len(self.admins) > 99:
                return await msg.answer(f"Уже максимум администраторов!")

            if puid in self.admins:
                return await msg.answer("Уже администратор!")

            if puid in self.banset:
                return await msg.answer("Этот пользователь забанен!")

            self.admins.append(puid)
            return await msg.answer(f"Успешно сделан администратором: {puid}!")

        if text.startswith(self.commands[3]):
            if puid not in self.admins:
                return await msg.answer(f"Пользователь не администратор: {puid}!")
            else:
                self.admins.remove(puid)
                return await msg.answer(f"Пользователь разжалован из администраторов: {puid}!")

        if text.startswith(self.commands[4]):
            if len(self.moders) > 99:
                return await msg.answer(f"Уже максимум модераторов!")

            if puid in self.moders:
                return await msg.answer("Уже модератор!")

            self.moders.append(puid)
            return await msg.answer(f"Успешно сделан модератором: {puid}!")

        if text.startswith(self.commands[5]):
            if puid not in self.moders:
                return await msg.answer(f"Пользователь не модератор: {puid}!")
            else:
                self.moders.remove(puid)
                return await msg.answer(f"Пользователь разжалован из модераторов: {puid}!")
Пример #17
0
 def prepare(elms):
     return tuple(
         traverse(
             list(list(pr + " " + e for e in elms) for pr in bslist)))
Пример #18
0
    async def process_message(self, msg):
        command, text = self.parse_message(msg)

        if command in self.commands_turn_on:
            if self.configurations.get(msg.peer_id, 0) in (0, 2):
                self.configurations[msg.peer_id] = 1
                return await msg.answer("Буду всё слушать и записывать :D")

            return await msg.answer("Я и так всё слушаю и записываю \_C:_/")

        if command in self.commands_turn_off:
            del self.configurations[msg.peer_id]
            return await msg.answer("Как хочешь...")

        sound, exten = None, None

        async def check(ats):
            if not ats:
                return None, None

            for at in ats:
                if at.type == "doc" and at.raw.get("ext") in SUPPORTED:
                    async with aiohttp.ClientSession() as sess:
                        async with sess.get(at.url) as resp:
                            return await resp.read(), at.raw.get("ext")

            return None, None

        if msg.brief_attaches:
            sound, exten = await check(await msg.get_full_attaches())

        if sound is None and msg.brief_forwarded:
            for m in traverse(await msg.get_full_forwarded()):
                sound, exten = await check(await m.get_full_attaches())

                if sound is not None:
                    break

        if sound is None and command in self.commands_once:
            if self.configurations.get(msg.peer_id, 0) in (0, 2):
                self.configurations[msg.peer_id] = 2
                return await msg.answer("Переведу следующее в текст ;)")

            return await msg.answer("Я и так всё перевожу \_C:_/")

        if sound is None:
            if msg.is_out or self.configurations.get(msg.peer_id, 0) == 1:
                return False

            return await msg.answer("Мне нечего перевести в текст :(")

        @aiohttp.streamer
        def sound_sender(writer):
            chunki = 0
            chunk = sound[chunki * CHUNK_SIZE:(chunki + 1) * CHUNK_SIZE]

            while chunk:
                yield from writer.write(chunk)

                chunki += 1
                chunk = sound[chunki * CHUNK_SIZE:(chunki + 1) * CHUNK_SIZE]

        url = 'http://asr.yandex.net/asr_xml' + \
            '?uuid=%s&key=%s&topic=%s&lang=%s' % (uuid.uuid4().hex, \
                self.key, 'notes', 'ru-RU')

        async with aiohttp.ClientSession() as sess:
            async with sess.post(url,
                                 data=sound_sender(),
                                 headers={"Content-Type":
                                          SUPPORTED[exten]}) as resp:
                response = await resp.text()

                if resp.status != 200:
                    return await msg.answer(
                        "Мне не получилось ничего разобрать или я больше не работаю!"
                    )

        root = ET.fromstring(response)

        if root.attrib['success'] not in ("1", 1):
            return await msg.answer("Мда. Нет.")

        return await msg.answer("\"" + str(root[0].text) + "\"")
Пример #19
0
    def train(self):

        self.net_mode(train=True)

        ones = torch.ones(self.batch_size,
                          dtype=torch.long,
                          device=self.device)
        zeros = torch.zeros(self.batch_size,
                            dtype=torch.long,
                            device=self.device)

        epochs = int(np.ceil(self.steps) / len(self.dataloader))
        print("number of epochs {}".format(epochs))

        step = 0

        for e in range(epochs):
            #for e in range(1):

            for x_true1, x_true2 in self.dataloader:

                #if step == 50: break

                step += 1

                # VAE
                x_true1 = x_true1.unsqueeze(1).to(self.device)
                #print("x_true1 size {}".format(x_true1.size()))

                x_recon, mu, logvar, z = self.VAE(x_true1)

                #print("x_recon size {}".format(x_recon.size()))
                #print("mu size {}".format(mu.size()))
                #print("logvar size {}".format(logvar.size()))
                #print("z size {}".format(z.size()))

                # Reconstruction and KL
                vae_recon_loss = recon_loss(x_true1, x_recon)
                #print("vae recon loss {}".format(vae_recon_loss))
                vae_kl = kl_div(mu, logvar)
                #print("vae kl loss {}".format(vae_kl))

                # Total Correlation
                D_z = self.D(z)
                #print("D_z size {}".format(D_z.size()))
                tc_loss = (D_z[:, :1] - D_z[:, 1:]).mean()
                #print("tc loss {}".format(tc_loss))

                # VAE loss
                vae_loss = vae_recon_loss + vae_kl + self.gamma * tc_loss
                #print("Total VAE loss {}".format(vae_loss))

                # Optimise VAE
                self.optim_VAE.zero_grad()  #zero gradients the buffer
                vae_loss.backward(retain_graph=True)
                self.optim_VAE.step()  #Does the step

                # Discriminator
                x_true2 = x_true2.unsqueeze(1).to(self.device)
                z_prime = self.VAE(x_true2, decode=False)[3]
                z_perm = permute_dims(z_prime).detach(
                )  ## detaches the output from the graph. no gradient will be backproped along this variable.
                D_z_perm = self.D(z_perm)

                # Discriminator loss
                d_loss = 0.5 * (F.cross_entropy(D_z, zeros) +
                                F.cross_entropy(D_z_perm, ones))
                #print("d_loss {}".format(d_loss))

                # Optimise Discriminator
                self.optim_D.zero_grad()
                d_loss.backward()
                self.optim_D.step()

                # Logging
                if step % self.args.log_interval == 0:

                    print("Step {}".format(step))
                    print("Recons. Loss = " + "{:.4f}".format(vae_recon_loss))
                    print("KL Loss = " + "{:.4f}".format(vae_kl))
                    print("TC Loss = " + "{:.4f}".format(tc_loss))
                    print("Factor VAE Loss = " + "{:.4f}".format(vae_loss))
                    print("D loss = " + "{:.4f}".format(d_loss))

                # Saving
                if not step % self.args.save_interval:
                    filename = 'traversal_' + str(step) + '.png'
                    filepath = os.path.join(self.args.output_dir, filename)
                    traverse(self.net_mode, self.VAE, self.test_imgs, filepath)

                # Saving plot gt vs predicted
                if not step % self.args.gt_interval:
                    filename = 'gt_' + str(step) + '.png'
                    filepath = os.path.join(self.args.output_dir, filename)
                    plot_gt_shapes(self.net_mode, self.VAE, self.dataloader_gt,
                                   filepath)
Пример #20
0
    def train(self):

        self.net_mode(train=True)

        ones = torch.ones(self.batch_size,
                          dtype=torch.long,
                          device=self.device)
        zeros = torch.zeros(self.batch_size,
                            dtype=torch.long,
                            device=self.device)

        epochs = int(np.ceil(self.steps) / len(self.dataloader))
        print("number of epochs {}".format(epochs))

        step = 0
        # dict of init opt weights
        #dict_init = {a: defaultdict(list) for a in range(10)}
        # dict of VAE opt weights
        #dict_VAE = {a:defaultdict(list) for a in range(10)}

        weights_names = [
            'encoder.2.weight', 'encoder.10.weight', 'decoder.0.weight',
            'decoder.7.weight', 'net.4.weight'
        ]

        dict_VAE = defaultdict(list)
        dict_weight = {a: [] for a in weights_names}

        for e in range(epochs):
            #for e in range():

            for x_true1, x_true2 in self.dataloader:

                #if step == 1: break

                step += 1
                """

                # TRACKING OF GRADS
                print("GRADS")
                for name, params in self.VAE.named_parameters():

                    if name == 'encoder.2.weight':
                        #size : 32,32,4,4
                        print("Grads: Before VAE optim step {}".format(step))
                        #if params.grad != None:
                        if step != 1:
                            if np.array_equal(dict_VAE[name], params.grad.numpy()) == False :
                            #if dict_VAE[name] != tuple(params.grad.numpy()):
                                print("Change in gradients {}".format(name))
                                #dict_init[step][name] = params.grad.numpy()
                                dict_VAE[name] = params.grad.numpy().copy()
                            else:
                                print("No change in gradients {}".format(name))
                            #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        else:
                            print("name {}, params grad {}".format(name, params.grad))
                            #dict_init[step][name] = None
                            dict_VAE[name] = None

                    if name == 'encoder.10.weight':
                        #size : 32,32,4,4
                        #print("Before VAE optim  encoder step {}".format(step))
                        #if params.grad != None:
                        if step != 1:
                            if np.array_equal(dict_VAE[name], params.grad.numpy()) == False :
                            #if dict_VAE[name] != tuple(params.grad.numpy()):
                                print("Change in gradients {}".format(name))
                                #dict_init[step][name] = params.grad.numpy()
                                dict_VAE[name] = params.grad.numpy().copy()
                            else:
                                print("No change in gradients {}".format(name))
                            #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        else:
                            print("name {}, params grad {}".format(name, params.grad))
                            #dict_init[step][name] = None
                            dict_VAE[name] = None

                    if name == 'decoder.0.weight':

                        #print("Before VAE optim  decoder step {}".format(step))
                        #if params.grad != None:
                        if step != 1:

                            if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            #if dict_VAE[name] != tuple(params.grad.numpy()):
                                print("Change in gradients {}".format(name))
                                dict_VAE[name] = params.grad.numpy().copy()
                            else:
                                print("No change in gradients {}".format(name))
                            #print("name {}, params grad {}".format(name, params.grad[:5, :2]))
                        else:
                            print("name {}, params grad {}".format(name, params.grad))
                            #dict_init[step][name] = None
                            dict_VAE[name] = None

                    if name == 'decoder.7.weight':

                        #print("Before VAE optim  decoder step {}".format(step))
                        #if params.grad != None:
                        if step != 1:

                            if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            #if dict_VAE[name] != tuple(params.grad.numpy()):
                                print("Change in gradients {}".format(name))
                                dict_VAE[name] = params.grad.numpy().copy()
                            else:
                                print("No change in gradients {}".format(name))
                            #print("name {}, params grad {}".format(name, params.grad[1, 1, :, :]))
                        else:
                            print("name {}, params grad {}".format(name, params.grad))
                            #dict_init[step][name] = None
                            dict_VAE[name] = None

                for name, params in self.D.named_parameters():

                    if name == 'net.4.weight':

                        #print("Before VAE optim  discrim step {}".format(step))
                        #if params.grad != None:
                        if step != 1:

                            if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            #if dict_VAE[name] != tuple(params.grad.numpy()):
                                print("Change in gradients {}".format(name))
                                dict_VAE[name] = params.grad.numpy().copy()
                            else:
                                print("No change in gradients {}".format(name))
                            #print("name {}, params grad {}".format(name, params.grad[1, 1, :, :]))
                        else:
                            print("name {}, params grad {}".format(name, params.grad))
                            #dict_init[step][name] = None
                            dict_VAE[name] = None
                        print()

                """

                # VAE
                x_true1 = x_true1.unsqueeze(1).to(self.device)
                #print("x_true1 size {}".format(x_true1.size()))

                x_recon, mu, logvar, z = self.VAE(x_true1)

                # Reconstruction and KL
                vae_recon_loss = recon_loss(x_true1, x_recon)
                #print("vae recon loss {}".format(vae_recon_loss))
                vae_kl = kl_div(mu, logvar)
                #print("vae kl loss {}".format(vae_kl))

                # Total Correlation
                D_z = self.D(z)
                #print("D_z size {}".format(D_z.size()))
                tc_loss = (D_z[:, :1] - D_z[:, 1:]).mean()
                #print("tc loss {}".format(tc_loss))

                # VAE loss
                vae_loss = vae_recon_loss + vae_kl + self.gamma * tc_loss
                #print("Total VAE loss {}".format(vae_loss))

                #print("Weights: Before VAE, step {}".format(step))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))

                # Optimise VAE
                self.optim_VAE.zero_grad()  #zero gradients the buffer, grads
                """
                print("after zero grad step {}".format(step))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("encoder.10.weight {}".format(self.optim_VAE.param_groups[0]['params'][10][:, :2]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))

                # check if the VAE is optimizing the encoder and decoder
                for name, params in self.VAE.named_parameters():

                    if name == 'encoder.2.weight':
                        # size : 32,32,4,4
                        if step == 1:
                            print("name {}, params grad {}".format(name, params.grad))
                        #else:
                            #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                    if name == 'encoder.10.weight':
                        # size : 32,32,4,4
                        if step == 1:
                            print("name {}, params grad {}".format(name, params.grad))
                        #else:
                            #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                for name, params in self.D.named_parameters():

                    if name == 'net.4.weight':
                        # size : 32,32,4,4
                        if step == 1:

                            print("name {}, params grad {}".format(name, params.grad))
                        #else:

                            #print("name {}, params grad {}".format(name, params.grad[:5, :5]))

                """

                vae_loss.backward(
                    retain_graph=True)  # grad parameters are populated
                """
                print()
                print("after backward step {}".format(step))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("encoder.10.weight {}".format(self.optim_VAE.param_groups[0]['params'][10][:, :2]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))
                # check if the VAE is optimizing the encoder and decoder
                for name, params in self.VAE.named_parameters():
                    if name == 'encoder.2.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'encoder.10.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                for name, params in self.D.named_parameters():
                    if name == 'net.4.weight':
                        # size : 1000,1000
                        #print("name {}, params grad {}".format(name, params.grad[:5, :5]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))"""

                self.optim_VAE.step()  #Does the step
                #print()
                #print("after VAE update step {}".format(step))
                #print("encoder.2.weight size {}".format(self.optim_VAE.param_groups[0]['params'][2].size()))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0,0,:,:]))
                #print("encoder.10.weight {}".format(self.optim_VAE.param_groups[0]['params'][10][:, :2]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))
                """

                # check if the VAE is optimizing the encoder and decoder
                for name, params in self.VAE.named_parameters():
                    if name == 'encoder.2.weight':
                        #size : 32,32,4,4
                        print("After VAE optim step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                        #if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                    if name == 'encoder.10.weight':
                        #size : 20, 128
                        #print("size of {}: {}".format(name, params.grad.size()))
                        #print("After VAE optim  encoder step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                        #if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                    if name == 'decoder.0.weight':
                        #128,10
                        #print("After VAE optim  decoder linear step {}".format(step))
                        #print("size of {}: {}".format(name, params.grad.size()))
                        #print("name {}, params grad {}".format(name, params.grad[:3, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                        #if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                    if name == 'decoder.7.weight':
                        #print("After VAE optim  decoder step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[1, 1, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                        #if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                for name, params in self.D.named_parameters():

                    if name == 'net.4.weight':
                        #print("After VAE optim  discriminator step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[:5,:5]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                        #if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))
                        print()

                """

                #print()
                #print("Before Syn step {}".format(step))
                #print("encoder.2.weight size {}".format(self.optim_VAE.param_groups[0]['params'][2].size()))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("encoder.10.weight {}".format(self.optim_VAE.param_groups[0]['params'][10][:, :2]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))

                ##################
                #Synergy Max

                # Step 1: compute the argmax of D kl (q(ai | x(i)) || )
                best_ai = greedy_policy_Smax_discount(self.z_dim,
                                                      mu,
                                                      logvar,
                                                      alpha=self.omega)

                # Step 2: compute the Imax
                mu_syn = mu[:, best_ai]
                logvar_syn = logvar[:, best_ai]

                if len(mu_syn.size()) == 1:
                    I_max = kl_div_uni_dim(mu_syn, logvar_syn).mean()
                    # print("here")
                else:
                    I_max = kl_div(mu_syn, logvar_syn)

                #I_max1 = I_max_batch(best_ai, mu, logvar)
                #print("I_max step{}".format(I_max, step))

                # Step 3: Use it in the loss
                syn_loss = self.alpha * I_max
                #print("syn_loss step {}".format(syn_loss, step))

                # Step 4: Optimise Syn term
                self.optim_VAE.zero_grad(
                )  # set zeros all the gradients of VAE network
                """
                #print()
                print("after zeros Syn step {}".format(step))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("encoder.10.weight {}".format(self.optim_VAE.param_groups[0]['params'][10][:, :2]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))

                for name, params in self.VAE.named_parameters():
                    if name == 'encoder.2.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'encoder.10.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'decoder.0.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'decoder.7.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                for name, params in self.D.named_parameters():
                    if name == 'net.4.weight':
                        # size : 1000,1000
                        #print("name {}, params grad {}".format(name, params.grad[:5, :5]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                """

                syn_loss.backward(retain_graph=True)  #backprop the gradients
                """

                print()
                print("after Syn backward step {}".format(step))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("encoder.10.weight {}".format(self.optim_VAE.param_groups[0]['params'][10][:, :2]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))

                for name, params in self.VAE.named_parameters():
                    if name == 'encoder.2.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'encoder.10.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'decoder.0.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'decoder.7.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))



                for name, params in self.D.named_parameters():
                    if name == 'net.4.weight':
                        # size : 1000,1000
                        #print("name {}, params grad {}".format(name, params.grad[:5, :5]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                """

                self.optim_VAE.step(
                )  #Does the update in VAE network parameters

                #print()
                #print("after Syn update step {}".format(step))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("encoder.10.weight {}".format(self.optim_VAE.param_groups[0]['params'][10][:, :2]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))

                ###################
                """
                # check if the VAE is optimizing the encoder and decoder
                for name, params in self.VAE.named_parameters():
                    if name == 'encoder.2.weight':
                        # size : 32,32,4,4
                        print("After Syn optim step {}".format(step))
                        # print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                    if name == 'encoder.10.weight':
                        # size :
                        #print("After Syn optim  encoder step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[:, :]))
                        #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))

                            dim_changes = []
                            for dim in range(20):
                                if np.array_equal(dict_VAE[name][dim, :2], params.grad.numpy()[dim, :2]) == False:
                                    dim_changes.append(dim)
                            print("Changes in dimensions: {}".format(dim_changes))

                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                    if name == 'decoder.0.weight':
                        # 1024, 128
                        #print("After Syn optim  decoder linear step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[:5, :2]))
                        #print("name {}, params grad {}".format(name, params.grad[:3, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                    if name == 'decoder.7.weight':
                        #print("After Syn optim  decoder step {}".format(step))
                        # print("name {}, params grad {}".format(name, params.grad[1, 1, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                for name, params in self.D.named_parameters():

                    if name == 'net.4.weight':
                        #print("After Syn optim  discriminator step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[:5,:5]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))
                        print()
                """
                # Discriminator
                x_true2 = x_true2.unsqueeze(1).to(self.device)
                z_prime = self.VAE(x_true2, decode=False)[3]
                z_perm = permute_dims(z_prime).detach(
                )  ## detaches the output from the graph. no gradient will be backproped along this variable.
                D_z_perm = self.D(z_perm)

                # Discriminator loss
                d_loss = 0.5 * (F.cross_entropy(D_z, zeros) +
                                F.cross_entropy(D_z_perm, ones))
                #print("d_loss {}".format(d_loss))

                #print("dict VAE {}".format(dict_VAE['encoder.2.weight'][0, 0, :, :]))

                #print("before Disc, step {}".format(step))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))

                # Optimise Discriminator
                self.optim_D.zero_grad()
                """

                print("after zero grad Disc step {}".format(step))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))

                for name, params in self.VAE.named_parameters():
                    if name == 'encoder.2.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'encoder.10.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'decoder.0.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'decoder.7.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                for name, params in self.D.named_parameters():
                    if name == 'net.4.weight':
                        # size : 1000,1000
                        #print("name {}, params grad {}".format(name, params.grad[:5, :5]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                """

                d_loss.backward()
                """
                print()
                print("after backward Disc step {}".format(step))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))
                # check if the VAE is optimizing the encoder and decoder
                for name, params in self.VAE.named_parameters():
                    if name == 'encoder.2.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'encoder.10.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'decoder.0.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                    if name == 'decoder.7.weight':
                        # size : 32,32,4,4
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))

                for name, params in self.D.named_parameters():
                    if name == 'net.4.weight':
                        # size : 1000,1000
                        #print("name {}, params grad {}".format(name, params.grad[:5, :5]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))
                """

                self.optim_D.step()

                #print("dict VAE {}".format(dict_VAE['encoder.2.weight'][0, 0, :, :]))
                """
                print()
                print("after update disc step {}".format(step))
                #print("encoder.2.weight size {}".format(self.optim_VAE.param_groups[0]['params'][2].size()))
                #print("encoder.2.weight {}".format(self.optim_VAE.param_groups[0]['params'][2][0, 0, :, :]))
                #print("net.4.weight {}".format(self.optim_D.param_groups[0]['params'][4][:5, :5]))

                for name, params in self.VAE.named_parameters():

                    if name == 'encoder.2.weight':
                        #size : 32,32,4,4
                        print("After Discriminator optim  encoder step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))
                        #print("dict VAE {}".format(dict_VAE[name][0, 0, :, :]))
                        #if np.isclose(dict_VAE[name], params.grad.numpy(), rtol=1e-05, atol=1e-08, equal_nan=False): "Works"
                        #if np.all(abs(dict_VAE[step][name] - params.grad.numpy())) < 1e-7 == False:
                        #if dict_VAE[name] != tuple(params.grad.numpy()):
                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))
                        #print("dict VAE {}".format(dict_VAE[name][0, 0, :, :]))

                    if name == 'encoder.10.weight':
                        # size :
                        #print("After Syn optim  encoder step {}".format(step))
                        # print("name {}, params grad {}".format(name, params.grad[0, 0, :, :]))
                        #print("name {}, params grad {}".format(name, params.grad[:, :2]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                            # if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()


                        else:
                            print("No change in gradients {}".format(name))


                    if name == 'decoder.0.weight':
                        #1024, 128
                        #print("After Discriminator optim  decoder linear step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[:5, :2]))
                        #print("name {}, params grad {}".format(name, params.grad[:3, :]))


                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                        #if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                    if name == 'decoder.7.weight':
                        #print("After Discriminator optim  decoder step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[1, 1, :, :]))
                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                        #if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))


                for name, params in self.D.named_parameters():

                    if name == 'net.4.weight':
                        #print("After Discriminator optim  decoder step {}".format(step))
                        #print("name {}, params grad {}".format(name, params.grad[:5, :5]))

                        if np.array_equal(dict_VAE[name], params.grad.numpy()) == False:
                        #if dict_VAE[name] != tuple(params.grad.numpy()):
                            print("Change in gradients {}".format(name))
                            dict_VAE[name] = params.grad.numpy().copy()
                        else:
                            print("No change in gradients {}".format(name))
                        print()"""

                # Logging
                if step % self.args.log_interval == 0:

                    print("Step {}".format(step))
                    print("Recons. Loss = " + "{:.4f}".format(vae_recon_loss))
                    print("KL Loss = " + "{:.4f}".format(vae_kl))
                    print("TC Loss = " + "{:.4f}".format(tc_loss))
                    print("Factor VAE Loss = " + "{:.4f}".format(vae_loss))
                    print("D loss = " + "{:.4f}".format(d_loss))
                    print("best_ai {}".format(best_ai))
                    print("I_max {}".format(I_max))
                    print("Syn loss {:.4f}".format(syn_loss))

                # Saving
                if not step % self.args.save_interval:
                    filename = 'alpha_' + str(
                        self.alpha) + '_traversal_' + str(step) + '.png'
                    filepath = os.path.join(self.args.output_dir, filename)
                    traverse(self.net_mode, self.VAE, self.test_imgs, filepath)
Пример #21
0
    async def process_message(self, msg):
        command, otext = self.parse_message(msg)

        i, url, name, last_name = None, None, None, None

        for m in traverse(await msg.get_full_forwarded()):
            if m.full_text:
                if i == m.true_user_id:
                    text += "\n" + m.full_text
                    continue
                elif i is not None:
                    break

                i = m.true_user_id

                u = await self.api.users.get(user_ids=i, fields="photo_max")
                if not u: continue
                u = u[0]

                url = u["photo_max"]
                name = u["first_name"]
                last_name = u["last_name"]

                text = m.full_text
        else:
            if i is None:
                return await msg.answer("Нечего цитировать!")

        async with aiohttp.ClientSession() as sess:
            async with sess.get(url) as response:
                img = Image.open(io.BytesIO(await response.read()))
                img = img.resize((200, 200))

        rsize = (700, 400)
        res = Image.new("RGBA", rsize, color=(0, 0, 0))
        res.paste(img, (25, 100))

        tex = Image.new("RGBA", rsize, color=(0, 0, 0))

        draw = ImageDraw.Draw(tex)

        sidth = draw.textsize(" ", font=self.f)[0]
        seight = int(draw.textsize("I", font=self.f)[1] * 1.05)

        text = text.splitlines()

        midth = 0
        width = 0
        height = 0
        for line in text:
            for word in line.split(" "):
                size = draw.textsize(word, font=self.f)

                if width + size[0] >= rsize[0] - 340:
                    height += seight
                    width = 0

                draw.text((width, height), word, font=self.f)
                width += sidth + size[0]

                if width > midth:
                    midth = width

            height += seight
            width = 0

        y = rsize[1] // 2 - height // 2
        x = 300 + (rsize[0] - 370 - midth) // 2
        res.alpha_composite(tex, (x, y))

        if height < 210:
            height = 210
            y = rsize[1] // 2 - height // 2

        res.alpha_composite(self.q, (250, y + 10))
        res.alpha_composite(self.qf,
                            (rsize[0] - 75, y + int(height - seight * 2 + 10)))

        draw = ImageDraw.Draw(res)
        draw.multiline_text(
            (25, 310),
            f"© {name} {last_name}{' - ' + otext if otext else ''}\n"
            f"@ {datetime.date.today()}",
            font=self.fs)

        f = io.BytesIO()
        res.save(f, format='png')
        f.seek(0)
        attachment = await upload_photo(self.api, f, msg.user_id)
        f.close()

        return await msg.answer('', attachment=str(attachment))
Пример #22
0
            return self.resource(request).GET()

        path = self.path

        resource = traverse(path, request)
        if resource is None:
            return u''

        gresource = IResource(resource, None)
        if gresource is not None:
            try:
                return gresource.render(request)
            except Exception, err:
                log_exc(str(err))
                raise
        else:
            return traverse(path, request).GET()

    def __call__(self, request, package):
        return self


class ResourceFactory(object):
    interface.implements(IPackageResourceFactory)

    def __init__(self, factory):
        self.factory = factory

    def __call__(self, *args, **kw):
        return self.factory(*args, **kw)
Пример #23
0
async def parse_user_id(msg,
                        can_be_argument=True,
                        argument_ind=-1,
                        custom_text=None):
    for m in traverse(await msg.get_full_forwarded()):
        if m.user_id and m.true_user_id != msg.user_id:
            return m.true_user_id

    if not can_be_argument:
        return None

    if custom_text is None:
        original_text = msg.text
    else:
        original_text = custom_text

    text = original_text.split(" ")[argument_ind]

    if text.isdigit():
        return int(text)

    if text.startswith("https://vk.com/"):
        text = text[15:]

    if text[:3] == "[id":
        puid = text[3:].split("|")[0]

        if puid.isdigit() and "]" in text[3:]:
            return int(puid)

    if "__chat_data" in msg.meta:
        if argument_ind == -1:
            targets = [original_text.split(" ")[-1].strip().lower()]
        else:
            targets = [
                i.strip().lower()
                for i in original_text.split(" ")[argument_ind:argument_ind +
                                                  2]
            ]

        max_match, user_id = 0, None
        for u in msg.meta["__chat_data"].users:
            if u.get("screen_name") == text:
                return u["id"]

            matches = 0
            if u.get("first_name", "").strip().lower() in targets:
                matches += 1
            if u.get("last_name", "").strip().lower() in targets:
                matches += 1
            if u.get("nickname", "").strip().lower() in targets:
                matches += 1

            if matches > 0:
                if matches > max_match:
                    max_match = matches
                    user_id = u["id"]

                elif matches == max_match:
                    user_id = None
                    break

        if user_id is not None:
            return user_id

    tuid = await msg.api.utils.resolveScreenName(screen_name=text)

    if tuid and isinstance(tuid, dict):
        return tuid.get("object_id")

    return None
Пример #24
0
    if len(rooms.keys()) != 0:
        print(f"There are already {len(rooms)} visited rooms")

    # Initialize the Game
    response = debounce(game_init, game_state)

    room = Room(response)
    print(f"Current room: {room}")
    player.play(room)
    record_move(rooms, room)

    # Game Mode
    print(f"Please choose running mode:")
    print(f"1 - automatic traversal")
    print(f"2 - manual")
    print(f"3 - work to find and sell items")
    print(f"4 - mine for coins")
    print(f"5 - change name")
    running_mode = int(input())

    if running_mode == 1:
        traverse(rooms, player, game_state)
    elif running_mode == 2:
        repl(rooms, player, game_state)
    elif running_mode == 3:
        work(rooms, player, game_state)
    elif running_mode == 4:
        mine(rooms, player, game_state)
    elif running_mode == 5:
        change_name(rooms, player, game_state)
Пример #25
0
    def train(self):

        self.net_mode(train=True)

        ones = torch.ones(self.batch_size,
                          dtype=torch.long,
                          device=self.device)
        zeros = torch.zeros(self.batch_size,
                            dtype=torch.long,
                            device=self.device)

        epochs = int(np.ceil(self.steps) / len(self.dataloader))
        print("number of epochs {}".format(epochs))

        step = 0

        for e in range(epochs):
            #for e in range():

            for x_true1, x_true2 in self.dataloader:

                if step == 1: break

                step += 1

                # VAE
                x_true1 = x_true1.unsqueeze(1).to(self.device)
                x_recon, mu, logvar, z = self.VAE(x_true1)

                # Reconstruction and KL
                vae_recon_loss = recon_loss(x_true1, x_recon)
                vae_kl = kl_div(mu, logvar)

                # Total Correlation
                D_z = self.D(z)
                tc_loss = (D_z[:, :1] - D_z[:, 1:]).mean()

                # Synergy term
                best_ai = self.D_syn(mu, logvar)
                best_ai_labels = torch.bernoulli(best_ai)

                # TODO Copy to an empty tensor

                mu[best_ai_labels == 0] = 0
                logvar_syn[best_ai_labels == 0] = 0

                # TODO For to KL

                for i in range(self.batch_size):
                    mu_syn_s = mu_syn[i][mu_syn[i] != 0]

                if len(mu_syn.size()) == 1:
                    syn_loss = kl_div_uni_dim(mu_syn, logvar_syn).mean()
                    # print("here")
                else:
                    syn_loss = kl_div(mu_syn, logvar_syn)

                # VAE loss
                vae_loss = vae_recon_loss + vae_kl + self.gamma * tc_loss + self.alpha * syn_loss

                # Optimise VAE
                self.optim_VAE.zero_grad()  #zero gradients the buffer, grads
                vae_loss.backward(
                    retain_graph=True)  # grad parameters are populated
                self.optim_VAE.step()  #Does the step

                # TODO Check the best greedy policy
                # Discriminator Syn
                real_seq = greedy_policy_Smax_discount(self.z_dim, mu, logvar,
                                                       0.8).detach
                d_syn_loss = recon_loss(real_seq, best_ai)

                # Optimise Discriminator Syn
                self.optim_D_syn.zero_grad(
                )  # set zeros all the gradients of VAE network
                d_syn_loss.backward(
                    retain_graph=True)  # backprop the gradients
                self.optim_D_syn.step(
                )  # Does the update in VAE network parameters

                # Discriminator TC
                x_true2 = x_true2.unsqueeze(1).to(self.device)
                z_prime = self.VAE(x_true2, decode=False)[3]
                z_perm = permute_dims(z_prime).detach(
                )  ## detaches the output from the graph. no gradient will be backproped along this variable.
                D_z_perm = self.D(z_perm)

                # Discriminator TC loss
                d_loss = 0.5 * (F.cross_entropy(D_z, zeros) +
                                F.cross_entropy(D_z_perm, ones))

                # Optimise Discriminator TC
                self.optim_D.zero_grad()
                d_loss.backward()
                self.optim_D.step()

                # Logging
                if step % self.args.log_interval == 0:

                    print("Step {}".format(step))
                    print("Recons. Loss = " + "{:.4f}".format(vae_recon_loss))
                    print("KL Loss = " + "{:.4f}".format(vae_kl))
                    print("TC Loss = " + "{:.4f}".format(tc_loss))
                    print("Syn Loss = " + "{:.4f}".format(syn_loss))
                    print("Factor VAE Loss = " + "{:.4f}".format(vae_loss))
                    print("D loss = " + "{:.4f}".format(d_loss))
                    print("best_ai {}".format(best_ai))
                    print("Syn loss {:.4f}".format(syn_loss))

                # Saving
                if not step % self.args.save_interval:
                    filename = 'traversal_' + str(step) + '.png'
                    filepath = os.path.join(self.args.output_dir, filename)
                    traverse(self.net_mode, self.VAE, self.test_imgs, filepath)