def DoResolve(url): ret = None text = '' try: theNet = net.Net() data = {'fuck_you': '', 'confirm': 'Click+Here+to+Watch+Free%21%21'} url = url.replace(' ', '%20') theNet.set_user_agent( 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3' ) html = theNet.http_POST(url, data).content.replace('\n', '').replace('\t', '') try: match = re.compile('file: "(.+)",.+?image').search(html).group(1) except Exception, e: match = re.compile('file=(.+?)&provider=http').search(html).group( 1).split('file=', 1)[-1] url = urllib.unquote(match) url = url.replace(' ', '%20') ret = url
def solve(url, cookie_file='', wait=True): solverregex = re.compile( 'var t,r,a,f, (.+?)={"(.+?)":(.+?)};.+challenge-form\'\);.*?\n.*?;(.*?);a\.value', re.DOTALL) vcregex = re.compile( '<input type="hidden" name="jschl_vc" value="([^"]+)"/>') headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36', 'Referer': url } request = requests.get(url, headers=headers).content passv = re.compile('<input type="hidden" name="pass" value="([^"]+)"/>' ).findall(request)[0] res = solverregex.findall(request) if len(res) == 0: return False res = res[0] vc = vcregex.findall(request) if len(vc) == 0: return False vc = vc[0] print "VC is ", vc varname = (res[0], res[1]) solved = int(solveEquation(res[2].rstrip())) print "Initial value: ", res[2], "Solved:", solved for extra in res[3].split(";"): extra = extra.rstrip() if extra[:len('.'.join(varname))] != '.'.join(varname): print "Extra does not start with varname (", extra, ")" else: extra = extra[len('.'.join(varname)):] if extra[:2] == "+=": solved += int(solveEquation(extra[2:])) elif extra[:2] == "-=": solved -= int(solveEquation(extra[2:])) elif extra[:2] == "*=": solved *= int(solveEquation(extra[2:])) elif extra[:2] == "/=": solved /= int(solveEquation(extra[2:])) else: print "Unknown modifier", extra http = url.split('//')[0] domain1 = url.split('//')[1] domain = domain1.split('/')[0] solved += len(domain) import net net = net.Net() if wait == True: print 'Sleepin' xbmc.sleep(6000) final = net.http_POST( http + "//" + domain + "/cdn-cgi/l/chk_jschl?jschl_vc={0}&pass={1}&jschl_answer={2}".format( vc, passv, solved), '', headers=headers) if not cookie_file == '': net.save_cookies(cookie_file) return final.content
def main(): # loading data dataset = datasets.PixelLinkIC15Dataset(opt.train_images_dir, opt.train_labels_dir) sampler = WeightedRandomSampler([1 / len(dataset)] * len(dataset), opt.batch_size, replacement=True) dataloader = DataLoader(dataset, batch_size=opt.batch_size, sampler=sampler) my_net = net.Net() # construct neural network # choose gpu or cpu if opt.gpu: device = torch.device("cuda:0") my_net = my_net.cuda() if opt.multi_gpu: my_net = nn.DataParallel(my_net) else: device = torch.device("cpu") # train, optimize my_net.apply(weight_init) optimizer = optim.SGD(my_net.parameters(), lr=opt.learning_rate, momentum=opt.momentum, weight_decay=opt.weight_decay) optimizer2 = optim.SGD(my_net.parameters(), lr=opt.learning_rate2, momentum=opt.momentum, weight_decay=opt.weight_decay) iteration = 0 train(opt.epoch, iteration, dataloader, my_net, optimizer, optimizer2, device)
def __init__(self, sanity_check=True): net.Net.__init__(self, sanity_check) self.conds = self.places self.events = self.trans self.nr_black = 0 self.nr_gray = 0 self.net = net.Net(sanity_check)
def __init__(self, position, identity): self.hunger = 0 self.proxAnt = 0 self.memory = 0 self.proxFood = [0, 0, 0, 0] self.proxWall = [0, 0, 0, 0] self.dirOut = 0 self.direction = 0 self.pos = position self.id = id #tells if previous action caused this ant to be inactive this time self.holding = 0 self.age = 0 self.offspring = [] self.genScore = np.size(self.offspring) self.lifeSpan = 4 #neural net self.neuralNet = net.Net() #rendering self.antShape = sf.RectangleShape((10, 10)) self.antShape.position = (self.pos[0] * 10, self.pos[1] * 10)
def retrain(): dataset = datasets.PixelLinkIC15Dataset(config.train_images_dir, config.train_labels_dir) sampler = WeightedRandomSampler([1 / len(dataset)] * len(dataset), config.batch_size, replacement=True) dataloader = DataLoader(dataset, batch_size=config.batch_size, sampler=sampler) my_net = net.Net() if config.gpu: device = torch.device("cuda:0") my_net = my_net.cuda() if config.multi_gpu: my_net = nn.DataParallel(my_net) else: device = torch.device("cpu") my_net.load_state_dict( torch.load(config.saving_model_dir + '%d.mdl' % config.retrain_model_index)) optimizer = optim.SGD(my_net.parameters(), lr=config.retrain_learning_rate2, \ momentum=config.momentum, weight_decay=config.weight_decay) optimizer2 = optim.SGD(my_net.parameters(), lr=config.retrain_learning_rate, \ momentum=config.momentum, weight_decay=config.weight_decay) train(config.retrain_epoch, config.retrain_model_index, dataloader, my_net, optimizer, optimizer2, device)
def DoResolve(url, results): try: theNet = net.Net() url = utils.URL + url.replace(' ', '%20')[1:] theNet.set_user_agent(utils.getUserAgent()) html = theNet.http_GET(url).content.replace('\n', '').replace('\t', '') sources = re.compile('{(.+?)}').findall( re.compile('sources:\s*(\[.*?\])').findall(html, re.DOTALL)[0]) for source in sources: try: fileUrl = re.compile('file:\s*?"(.+?)"').search(source).group( 1) label = re.compile('label:\s*?"(.+?)"').search(source) if label: label = label.group(1) else: label = '' results.append([fileUrl, label]) except: pass if len(results) == 0: links = re.compile(';file=(.+?)&provider=http\'').findall(html) for link in links: results.append([urllib.unquote_plus(link), '']) except: pass return results
def load_model(envname, save_model_dir): env = gym.make(envname) out_size = env.action_space.shape[0] in_size = env.observation_space.shape[0] model = mynet.Net(in_size, out_size, [4000, 500]) model, ckpt = nu.load_and_test(model, save_model_dir) return model, ckpt
def train(self): # Get device device = torch.device('cuda') # Prepare directory save_dir = Path(self.args.save_dir) save_dir.mkdir(exist_ok=True, parents=True) log_dir = Path(self.args.log_dir) log_dir.mkdir(exist_ok=True, parents=True) # Tensorboard visualization writer = SummaryWriter(log_dir=str(log_dir)) # Get data iteration content_iter, style_iter = load_data(self.args) # Get model vgg = net.vgg decoder = net.decoder vgg.load_state_dict(torch.load(self.args.vgg)) vgg = nn.Sequential(*list(vgg.children())[:31]) network = net.Net(vgg, decoder) network.train() network.to(device) # define optimizer optimizer = torch.optim.Adam(network.decoder.parameters(), lr=self.args.lr) for i in tqdm(range(self.args.max_iter)): adjust_learning_rate(self.args, optimizer, iteration_count=i) content_image = next(content_iter).to(device) style_images = next(style_iter).to(device) # Run the network and compute the loss t_adain, g_t_feats, style_feats = network(content_image, style_images) loss_c, loss_s, total_loss = calc_total_loss( self.args, t_adain, g_t_feats, style_feats) optimizer.zero_grad() total_loss.backward() optimizer.step() # visual loss writer.add_scalar('loss_content', loss_c.item(), i + 1) writer.add_scalar('loss_style', loss_s.item(), i + 1) if (i + 1) % self.args.save_model_interval == 0 or ( i + 1) == self.args.max_iter: state_dict = net.decoder.state_dict() for key in state_dict.keys(): state_dict[key] = state_dict[key].to(torch.device('cpu')) torch.save( state_dict, save_dir / 'decoder_iter_{:d}.pth.tar'.format(i + 1)) writer.close()
def DoResolve(url, results): try: import wco_utils as utils theNet = net.Net() data = {'fuck_you': '', 'confirm': 'Click+Here+to+Watch+Free%21%21'} url = url.replace(' ', '%20') #theNet.set_user_agent('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') theNet.set_user_agent(utils.getUserAgent()) html = theNet.http_POST(url, data).content.replace('\n', '').replace('\t', '') links = re.compile('file:.+?"(.+?)",.+?label:.+?"(.+?)"').findall(html) for link in links: try: results.append([link[0], link[1]]) except: pass if len(links) == 0: links = re.compile(';file=(.+?)&provider=http\'').findall(html) for link in links: results.append([urllib.unquote_plus(link), '']) if len(links) == 0: links = re.compile('file:"(.+?)"').findall(html.replace(' ', '')) for link in links: results.append([link, '']) except Exception, e: pass
def resolve(url): import net net = net.Net() web_url = url html = net.http_GET(web_url).content data = {} r = re.findall(r'type="hidden"\s+name="(.+?)"\s+value="(.*?)"', html) if r: for name, value in r: data[name] = value import captcha_lib data['method_free'] = 'Free Download' data.update(captcha_lib.do_captcha(html)) html = net.http_POST(web_url, data).content data = {} r = re.findall(r'type="hidden"\s+name="(.+?)"\s+value="(.*?)"', html) if r: for name, value in r: data[name] = value data['referer'] = web_url headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36' } request = urllib2.Request(web_url, data=urllib.urlencode(data), headers=headers) try: stream_url = urllib2.urlopen(request).geturl() except: return return stream_url
def __init__(self,device,decoder_path=None,transform_path=None,vgg_path=None): self.device = device self.decoder = net.decoder self.vgg = net.vgg self.network=net.Net(self.vgg,self.decoder) self.sa_module = self.network.sa_module self.decoder.eval() self.sa_module.eval() self.vgg.eval() self.decoder.load_state_dict(torch.load(decoder_path)) self.sa_module.load_state_dict(torch.load(transform_path)) self.vgg.load_state_dict(torch.load(vgg_path)) self.norm = nn.Sequential(*list(self.vgg.children())[:1]) self.enc_1 = nn.Sequential(*list(self.vgg.children())[:4]) # input -> relu1_1 self.enc_2 = nn.Sequential(*list(self.vgg.children())[4:11]) # relu1_1 -> relu2_1 self.enc_3 = nn.Sequential(*list(self.vgg.children())[11:18]) # relu2_1 -> relu3_1 self.enc_4 = nn.Sequential(*list(self.vgg.children())[18:31]) # relu3_1 -> relu4_1 self.enc_5 = nn.Sequential(*list(self.vgg.children())[31:44]) # relu4_1 -> relu5_1 self.norm.to(device) self.enc_1.to(device) self.enc_2.to(device) self.enc_3.to(device) self.enc_4.to(device) self.enc_5.to(device) self.sa_module.to(device) self.decoder.to(device) print('model init')
def __init__(self): pygame.init() self.clock = pygame.time.Clock() self.window = pygame.display.set_mode( (WIDTH, HEIGHT) ) pygame.display.set_caption(TITLE) self.font = pygame.font.Font('freesansbold.ttf', 32) self.server = net.Net() self.server.connectToServer()
def main(): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) train_loader, test_loader = data_loader.loader() model = net.Net().to(device) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) epochs = 1 for epoch in range(1, epochs + 1): train.train(model, device, train_loader, optimizer, epoch) PATH = './mnist_result.pth' torch.save(model.state_dict(), PATH) model = net.Net().to(device) model.load_state_dict(torch.load(PATH)) test.test(model, device, test_loader)
def __init__(self): self.net = net.Net() self.cache = SimpleCache() self.region = self.getRegion() self.filter = False if self.region == 'US' else True self.categoryMenu = self.getCategories() self.mediaType = self.getMediaTypes() log('__init__, region = ' + self.region)
def __init__(self, sysARG): log('__init__, sysARG = ' + str(sysARG)) self.token = (TOKEN or '') self.sysARG = sysARG self.cache = SimpleCache() self.net = net.Net(cookie_file=COOKIE_JAR, http_debug=DEBUG) self.lat, self.lon = self.setRegion() if self.login(USER_EMAIL, PASSWORD) == False: sys.exit()
def create_network(vgg, decoder): vgg = nn.Sequential(*list(vgg.children())[:44]) with torch.no_grad(): network = net.Net(vgg, decoder) network.train() network.to(device) network = nn.DataParallel(network, device_ids=[0,1]) return network
def create_net(): root = os.path.dirname(__file__) n = net.Net() msgnet_checkpoint = os.path.join(root, 'msgnet.pth') if os.path.exists(msgnet_checkpoint): n.load_state_dict(torch.load(msgnet_checkpoint)) n.eval() return n
def run_demo(eval_args): ## load parameters #content_image = 'images/content/xjtlu.jpg' #style_image = 'images/styles/starry_night.jpg' #eval_args = program_args(content_image,content_image,style_image,128,128,0) #eval_args = camero_args(style_image) if eval_args.cuda == 0: ctx = mx.cpu() else: ctx = mx.gpu() ## Change the content and style image using Style Loader #content_image = utils.tensor_load_rgbimage(eval_args.contentImage, ctx, size=eval_args.size, keep_asp=True) style_image = utils.tensor_load_rgbimage(eval_args.styleImage, ctx, size=eval_args.size) style_image = utils.preprocess_batch(style_image) style_model = net.Net(ngf=eval_args.ngf) style_model.load_parameters(eval_args.model, ctx=ctx) style_model.set_target(style_image) cam = cv2.VideoCapture(0) while True: ## read frame ret, frame = cam.read() # read content image (cimg) #cimg = img.copy() #img = np.array(img).transpose(2, 0, 1) content_img = load_image(frame, ctx, eval_args.size) output = style_model(content_img) tensor = output[0] #(b, g, r) = F.split(tensor, num_outputs=3, axis=0) #tensor = F.concat(r, g, b, dim=0) img = F.clip(tensor, 0, 255).asnumpy() img = img.transpose(1, 2, 0).astype('uint8') img = Image.fromarray(img) image = np.array( img.resize((frame.shape[1], frame.shape[0]), Image.ANTIALIAS)) #print(frame.shape,image.shape) numpy_horizontal = np.hstack((frame, image)) #cv2.imshow("Content Window",frame) #cv2.imshow("Style Window",grey) cv2.imshow("Test Window Shape", numpy_horizontal) if cv2.waitKey(1) & 0xFF == ord('q'): break cam.release() cv2.destroyAllWindows()
def evaluate(hps, X_val, y_val): model = net.Net(hps) model.build_graph() saver = tf.train.Saver() summary_writer = tf.summary.FileWriter(FLAGS.eval_dir) sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) tf.train.start_queue_runners(sess) while True: try: ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) except tf.errors.OutOfRangeError as e: tf.logging.error('Cannot restore checkpoint: %s', e) continue if not (ckpt_state and ckpt_state.model_checkpoint_path): tf.logging.info('No model to eval yet at %s', FLAGS.log_root) continue tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path) total_prediction, correct_prediction = 0, 0 for i in xrange(FLAGS.batch_count): start = hps.batch_size * i end = hps.batch_size * (i + 1) xx = X_val[start:end] yy = y_val[start:end] fd = {model.X: xx, model.y: yy} summaries, loss, predictions, truth, train_step = sess.run( [model.summaries, model.cost, model.predictions, model.labels, model.global_step], feed_dict=fd) truth = np.argmax(truth, axis=1) predictions = np.argmax(predictions, axis=1) correct_prediction += np.sum(truth == predictions) total_prediction += predictions.shape[0] accuracy = 1.0 * correct_prediction / total_prediction precision_summ = tf.Summary() precision_summ.value.add(tag='Accuracy', simple_value=accuracy) summary_writer.add_summary(precision_summ, train_step) summary_writer.add_summary(summaries, train_step) tf.logging.info('loss: %.3f, precision: %.3f' % (loss, accuracy)) summary_writer.flush() time.sleep(10)
def NetStrings(self,binary): self.runStrings(binary) if(binary.getStrings() == None): #Default is 0 binary.appendDiscreteList(0) return npy = net.Net() url,ip,mail = npy.check_strings(binary.getStrings()) binary.appendDiscreteList(1 if (url > 0) else 0) binary.appendDiscreteList(1 if (ip > 0) else 0) binary.appendDiscreteList(1 if (mail > 0) else 0)
def __init__(self, sess, model_name, phase, train_list, val_list, train_batch_size, val_batch_size, learning_rate, beta1,epoch, \ model_path = None, input_height = 160, input_width = 80, resize_height = 160, \ resize_width = 80, crop = False, grayscale = False, sp = ',', img_root = './', \ checkpoint_dir = './check_point',model_prefix='./tfmodel/', transfer = True, debug = False): self.sess = sess self.model_name = model_name self.phase = phase self.train_batch_size = train_batch_size self.val_batch_size = val_batch_size self.learning_rate = learning_rate self.beta1 = beta1 self.epoch = epoch self.model_path = model_path self.input_height = input_height self.input_width = input_width self.resize_height = resize_height self.resize_width = resize_width self.crop = crop self.grayscale = grayscale self.sp = sp self.img_root = img_root self.checkpoint_dir = checkpoint_dir self.model_prefix = model_prefix self.DEBUG =debug if not os.path.exists(self.checkpoint_dir): os.makedirs(self.checkpoint_dir) print self.input_width self.ior = mio.IO(self.input_height, self.input_width, \ resize_height = self.resize_height, resize_width = self.resize_width, \ crop = self.crop, grayscale = self.grayscale, sp = self.sp, img_root = self.img_root) self.train_data = self.ior.read_file(train_list) if self.DEBUG: print len(self.train_data) self.train_batch_idxs = len(self.train_data) // self.train_batch_size self.val_data = self.ior.read_file(val_list) self.val_batch_idxs = len(self.val_data) // self.val_batch_size if len(self.train_data) > 0: [image1,image2,label] = self.ior.get_image_pair(random.choice(self.train_data)) elif len(self.val_data) > 0: [image1,image2,label] = self.ior.get_image_pair(random.choice(self.val_data)) self.image_dims = list(image1.shape[:3]) print self.image_dims self.transfer = transfer self.net_ = net.Net(self.model_path, self.transfer) self.DNet_ = self.net_.load_DNet(prefix='d_') self.GNet_ = self.net_.load_GNet(prefix='g_') self.build_model()
def __init__(self, input_dim): self.input_dimension = input_dim decoder = net.decoder vgg = net.vgg self.generate_model_dict(vgg, self.input_dimension) vgg.load_state_dict(torch.load(vgg_path)) vgg = nn.Sequential(*list(vgg.children())[:31]) decoder.load_state_dict(torch.load(decoder_path)) self.net = net.Net(vgg, decoder) self.net.cuda()
def test_on_train_dataset(vis_per_img=10): dataset = datasets.PixelLinkIC15Dataset(config.train_images_dir, config.train_labels_dir, train=False) # dataloader = DataLoader(dataset, batch_size=config.batch_size, shuffle=False) my_net = net.Net() if config.gpu: device = torch.device("cuda:0") my_net = my_net.cuda() if config.multi_gpu: my_net = nn.DataParallel(my_net) else: device = torch.device("cpu") my_net.load_state_dict( torch.load(config.saving_model_dir + '%d.mdl' % config.test_model_index)) true_pos, true_neg, false_pos, false_neg = [0] * 4 for i in range(len(dataset)): sample = dataset[i] image = sample['image'].to(device) image = image.unsqueeze(0) my_labels = cal_label_on_batch(my_net, image)[0] # print("my labels num: %d" % len(my_labels)) res = comp_gt_and_output(my_labels, sample["label"], 0.5) if i % vis_per_img == 0: image = image.squeeze(0).cpu().numpy() image = ImgFormat.ImgOrderFormat(image, from_order="CHW", to_order="HWC") image = ImgTransform.UnzeroMeanImage(image, config.r_mean, config.g_mean, config.b_mean) image = ImgFormat.ImgColorFormat(image, from_color="RGB", to_color="BGR") image = visualize_label(image, my_labels, color=(0, 255, 0)) image = visualize_label(image, sample["label"]["coor"], color=(255, 0, 0)) cv2.imwrite("test_output/img_%d.jpg" % i, image) true_pos += res[0] false_pos += res[1] false_neg += res[2] if (true_pos + false_pos) > 0: precision = true_pos / (true_pos + false_pos) else: precision = 0 if (true_pos + false_neg) > 0: recall = true_pos / (true_pos + false_neg) else: recall = 0 print("i: %d, TP: %d, FP: %d, FN: %d, precision: %f, recall: %f" % (i, true_pos, false_pos, false_neg, precision, recall))
def test_output(): style_model = net.Net() #print(style_model) ctx = mx.cpu(0) X = mx.ndarray.random.normal(shape=(20,3,224,224),ctx=ctx) vgg = net.Vgg16() print(vgg) vgg.initialize() output = vgg.forward(X) for item in output: print(item.shape)
def evaluate(self): network = net.Net(self.arch, self.task_config) """Eval a network for a number of steps.""" with tf.Graph().as_default() as grph: # Get images and labels for CIFAR-10. eval_data = True images, labels = network.inputs(eval_data=eval_data) arch = self.arch init_cell = self.init_cell classification_cell = self.classification_cell log_stats = self.log_stats scope = "Nacnet" is_training = False logits = network.inference(images, arch, init_cell, classification_cell, log_stats, is_training, scope) # Calculate predictions. # if imagenet is running then run precision@1,5 top_k_op = tf.nn.in_top_k(logits, labels, 1) if self.dataset == "imagenet": # Quick dirty fixes to incorporate changes brought by # imagenet self.num_examples = 50000 top_5_op = tf.nn.in_top_k(logits, labels, 5) # Restore the moving average version of the learned variables for # eval. variable_averages = tf.train.ExponentialMovingAverage( net.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of # Summaries. summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(self.eval_dir, grph) while True: self.eval_once(saver, summary_writer, top_k_op, summary_op) if self.dataset == "imagenet": self.eval_once(saver, summary_writer, top_5_op, summary_op, k=5) if self.run_once: break
def simple_test(self): pgn = open('data/test/sample_eval.pgn').read() x, y = parse.pgn_file_to_array(pgn) (num_states, cols1, cols2, layers) = x.shape dim_result = (cols1, cols2, layers) dim_expected = (parse.NUM_COLUMNS, parse.NUM_COLUMNS, parse.NUM_DIMENSIONS) self.assertEqual(dim_result, dim_expected) net = net_module.Net() net.fit(x, y) y_hat = net.predict(x) self.assertEqual(y_hat.shape, (num_states, helpers.EXPECTED_MOVES))
def test_fake(self): net = net_module.Net() NUM_OBS = 1000 shape_input = (NUM_OBS, parse.NUM_COLUMNS, parse.NUM_COLUMNS, parse.NUM_DIMENSIONS) shape_output = (NUM_OBS, helpers.EXPECTED_MOVES) x_fake = np.random.random(shape_input) y_fake = np.random.randint(0, helpers.EXPECTED_MOVES, size=NUM_OBS) net.fit(x_fake, y_fake) y_hat = net.predict(x_fake) self.assertTrue(1, 0) self.assertEqual(y_hat.shape, shape_output)
def bayes_func(repeat_time, learning_rate, batch_size): utils.TRAIN_EPOCH_REPEAT_NUM = int(repeat_time) utils.BASE_LEARNING_RATE = learning_rate utils.BATCH_SIZE = int(batch_size) utils.SAVE_MODEL = False model_num = 0 _net = net.Net(model_num) _net.load_model(model_num) _net.logger.info( 'repeat_time: {}, learning_rate: {}, batch_size: {}'.format( repeat_time, learning_rate, batch_size)) net.train(model_num, write_summary=False) acc, loss = net.verificate(model_num) return -loss
def createCookie(url,cj=None,agent='Mozilla/5.0 (Windows NT 6.1; rv:32.0) Gecko/20100101 Firefox/32.0'): urlData='' try: import urlparse,cookielib,urllib2 class NoRedirection(urllib2.HTTPErrorProcessor): def http_response(self, request, response): return response def parseJSString(s): try: offset=1 if s[0]=='+' else 0 val = int(eval(s.replace('!+[]','1').replace('!![]','1').replace('[]','0').replace('(','str(')[offset:])) return val except: pass if cj==None: cj = cookielib.CookieJar() headers={'User-Agent' : agent,'Referer' : url} result = requests.get(url,headers=headers).content jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(result)[0] init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(result)[0] builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(result)[0] decryptVal = parseJSString(init) lines = builder.split(';') for line in lines: if len(line)>0 and '=' in line: sections=line.split('=') line_val = parseJSString(sections[1]) decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val))) answer = decryptVal + len(urlparse.urlparse(url).netloc) http=url.split('//')[0] domain1=url.split('//')[1] domain=domain1.split('/')[0] u=http+'//'+domain query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (u, jschl, answer) if 'type="hidden" name="pass"' in result: passval=re.compile('name="pass" value="(.*?)"').findall(result)[0] query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (u,urllib.quote_plus(passval), jschl, answer) xbmc.sleep(4*1000) ##sleep so that the call work import net net = net.Net() final = net.http_POST(query,'',headers=headers) if not cj == '': net.save_cookies(cj) return final.content except: traceback.print_exc(file=sys.stdout) return urldata