def main(): LANDMARKS_MODEL_URL = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2' face_list = cp.run(LANDMARKS_MODEL_URL) file_list = os.listdir("./pytorch_stylegan_encoder/aligned_images") print(file_list) name_list = [] for name in file_list: name_list.append(name) #npyfile_list = os.listdir("./npysave/") #print(npyfile_list) #npy_list = [] #for name in npyfile_list: # npy_list.append(name) for i in range(len(name_list)) : print(i," ",len(name_list)) args.image_path = "./pytorch_stylegan_encoder/aligned_images/"+name_list[i] print(args.image_path) args.dlatent_path="./npysave/unknown"+str(i+1)+".npy" print("args.dlatent_path",args.dlatent_path) optimize_latents() main_edit(args.dlatent_path, i)
def handle(msg): content_type, chat_type, chat_id = telepot.glance(msg) if content_type == 'text': text = msg['text'].upper().strip() if text.startswith('/START') : bot.sendMessage(chat_id, '*Digite:*\n /comandos - para ver comandos' , parse_mode='Markdown') if text.startswith('/COMANDOS'): comandos = "🛠*Lista de Comandos*\n\n• *FOTOS:* /fotos baixar imagens\n\n• *IP:* /find - Consultar ip\n\n• *BIN:* /bin - consultar uma bin\n\n• *TR:* /tr traduzir um texto" bot.sendMessage(chat_id, comandos, parse_mode= 'Markdown') if text.startswith('/MEME'): image = PegarPost() bot.sendPhoto(chat_id, image) if text.startswith('/FIND'): try: ip = text[6:] if ip and '@DARKMINE' not in text: locap = locapIp(ip) bot.sendMessage(chat_id, locap[0], parse_mode= 'Markdown') bot.sendPhoto(chat_id, locap[1], '@Darkmine_bot') else: data = '*Ip Location* - Localizar IP ou Site\n\nFormato:\n/ip ip ou hostname' bot.sendMessage(chat_id, data, parse_mode= 'Markdown') except Exception as erro: print(erro) data = '*Endereço de IP inválido!*' bot.sendMessage(chat_id, data, parse_mode= 'Markdown') if text.startswith('/BIN'): resp = BinChecker(text) bot.sendMessage(chat_id, resp, parse_mode= 'Markdown') if text.startswith('/TR'): texto = text[4:] if texto and '@DARKMINE' not in text: resp = traduzirTexto(texto) else: resp = "*Modo de uso:*\n\n /tr *Texto* - Para fazer a tradução de algum texto" bot.sendMessage(chat_id, resp, parse_mode= 'Markdown') if text.startswith('/ID'): nome = msg['from']['first_name'] user = msg['from']['username'] resp = 'Nome: {}\nUsuário: @{}\nId: {}'.format(nome, user, chat_id) bot.sendMessage(chat_id,resp) if text.startswith('/FOTOS'): if text[7:] and '@DARKMINE' not in text: text = text[7:].split(' ') num = int(text[len(text) - 1]) query = '+'.join(text[0:len(text)-1]) lista = run(query, num) for line in lista: if line == lista[len(lista) - 1]: bot.sendPhoto(chat_id, line, '@Darkmine_bot') else: bot.sendPhoto(chat_id, line) else: resp = "*Modo de uso:*\n\n/fotos *busca* *quantidade*\n\nEx: /fotos bolsonaro 5" bot.sendMessage(chat_id, resp, parse_mode='Markdown')
data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data.edge_attr = None data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr)) x = global_mean_pool(data.x, data.batch) x = F.elu(self.fc1(x)) x = F.dropout(x, training=self.training) return F.log_softmax(self.fc2(x), dim=1) model = MoNet(args.kernel_size).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_step, gamma=args.lr_decay) print(model) run(model, args.epochs, train_loader, test_loader, optimizer, scheduler, device)
from pathlib import Path import cv2 from image import run path = Path(__file__).parents[1] / 'puzzles/' for filename in path.iterdir(): img = cv2.imread(str(filename.resolve())) if img.shape[0] > 2000: img = cv2.resize(img, (500, 700)) run(img)
def deconstruct_image(image): def reconstruct_image(): return 0 tf.set_random_seed(1) np.random.seed(1) zzz = image.run() # print(color_intensities) # zzz = [ 3.45591175, 4.21306292, 1.39260529, 0.79979209, 2.80560091, 1.07702818, # 1.13857619, 2.06239613, 1.01650569, 2.74965652, 2.89521513, 1.78206757, # 3.03480862, 1.155502, 0.84656923, 0.94706706, 1.30763502, 14.22199015, # 1.01494266, 0.82881535, 1.20278014, 2.6748784, 1.52933522, 0.93917355] # gradient descent # -1/1 8 0.1 : 0.002077 # -1/1 8 0.5 : 0.001934 then waver up and down # -1/1 8 0.8 : # -1/1 8 1.0 : 12.37966 ..... # adamoptimizer # -1/1 10 0.1 : 0.022 # -1/1 10 0.001 : 0. 001126 # -1/1 10 0.0005 : 0. 0011261 # -1/1 16 0.0005 : 0.001918 # -1/1 20 0.001 : 0.001126 # -1/1 20 0.0005 : 0.000765 # -1/1 20 0.0001 : 0.0007651 # -1/1 25 0.00005 : 0.0007651 # 0/1 20 0.0001 : 0.0168 # -2/1 20 0.0001 : 0.0008762 # -0.5/0.5 20 0.0001 : 0.00109 # -2.0/2.0 20 0.0001 : 0.0016359 # -1/1 25 0.0005 : 0.0016355 # -1/1 30 0.0005 : 0.0016355 color_intensities = zzz / np.linalg.norm(zzz) # image -> color intensity # provided color_intensity -> avg of your color intensities # e # fake data x = np.linspace(-1.0, 1.0, 25)[:, np.newaxis] # shape (100, 1) # noise = np.random.normal(0, 0.1, size=x.shape) y = np.asarray(color_intensities) y.shape=(25,1) # shape (100, 1) + some noise # plot data # plt.scatter(x, y) # plt.show() tf_x = tf.placeholder(tf.float32, x.shape) # input x tf_y = tf.placeholder(tf.float32, y.shape) # input y # neural network layers l1 = tf.layers.dense(tf_x, 20, tf.nn.relu) # hidden layer output = tf.layers.dense(l1, 1) # output layer # step = tf.Variable(0, trainable=False) # rate = tf.train.exponential_decay(0.0005, step, 1, 0.9999) loss = tf.losses.mean_squared_error(tf_y, output) # compute cost optimizer = tf.train.AdamOptimizer(0.0005) train_op = optimizer.minimize(loss) sess = tf.Session() # control training and others sess.run(tf.global_variables_initializer()) # initialize var in graph plt.ioff() # something about plotting for step in range(25000): # train and net output _, l, pred = sess.run([train_op, loss, output], {tf_x: x, tf_y: y}) if step % 1000 == 0: # plot and show learning process # plt.cla() # plt.scatter(x, y) # plt.plot(x, pred, 'r-', lw=5) # plt.text(0.5, 0, 'Loss=%.4f' % l, fontdict={'size': 20, 'color': 'red'}) # plt.pause(1.0) # plt.text(0.5, 0, 'Loss=%.4f' % l, fontdict={'size': 20, 'color': 'red'}) # plt.show() print('Loss=%.12f' % l) tvars = tf.trainable_variables() tvars_vals = sess.run(tvars) for var, val in zip(tvars, tvars_vals): print(var.name, val) # Prints the name of the variable alongside its value. # print(output.numpy()) plt.scatter(x, y) plt.plot(x, pred, 'r-', lw=2) plt.text(0.5, 0, 'Loss=%.6f' % l, fontdict={'size': 20, 'color': 'red'}) plt.pause(1.0) plt.text(0.5, 0, 'Loss=%.6f' % l, fontdict={'size': 20, 'color': 'red'}) plt.show() # plt.ioff() plt.show()
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import image # LOAD DATA mnist = input_data.read_data_sets('data/', one_hot=True) images = image.run() # INIT WEIGHTS def init_weights(shape): init_random_dist = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(init_random_dist) # INIT BIAS def init_bias(shape): init_bias_vals = tf.constant(0.1, shape=shape) return tf.Variable(init_bias_vals) # CONV2D def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') # POOLING def max_pool_2by2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],