def get_files(fname, path, folder_name): files = [] for entry in utils.entry_itr(fname): _id = str(entry["id"]) for f in entry["files_changed"]: if not f["metadata"]["num_ast_diffs"] == n: continue b_ast = utils.get_ast_fname(f["buggy_file"]) f_ast = utils.get_ast_fname(f["fixed_file"]) diff = "SHIFT_" + f["ast_diff_file"] prefix = folder_name + "_" + _id + "_" files.append((os.path.join(path, _id, b_ast), prefix + b_ast)) files.append((os.path.join(path, _id, f_ast), prefix + f_ast)) files.append((os.path.join(path, _id, diff), prefix + diff)) js_file = utils.get_source(path, _id, f["buggy_file"]) files.append((js_file, folder_name + "_" + _id + "_" + js_file.replace(path + "/" + _id + "/", ""))) return files
def freudify(request, url=None): ''' Handles freudifying of the url it gets as POST-data. ''' if request.method == 'POST': form = URLform(request.POST) if form.is_valid(): url = request.POST['url'] else: return home(request, "Not a valid POST-data. Check the URL.") if url != None: # hack'n'slash because of mod_wsgi rewrite rules that remove double # slashes after domain name if url[:7] != 'http://': url = 'http://' + url[6:] source = get_source(url) if source is None: return home( request, "An error happened while trying to retrieve source from %s" % url) if isinstance(source, int): if source == -1: return home(request, "URL's source is too long.") return home(request, "%s returned %s" % (url, source)) freudified = slip(source, url) if freudified is None: return home(request, "Encountered an error while freudifying %s" % url) return HttpResponse(content=freudified) return HttpResponseRedirect(reverse('slip_base_url'))
def freudify(request, url = None): ''' Handles freudifying of the url it gets as POST-data. ''' if request.method == 'POST': form = URLform(request.POST) if form.is_valid(): url = request.POST['url'] else: return home(request, "Not a valid POST-data. Check the URL.") if url != None: # hack'n'slash because of mod_wsgi rewrite rules that remove double # slashes after domain name if url[:7] != 'http://': url = 'http://' + url[6:] source = get_source(url) if source is None: return home(request, "An error happened while trying to retrieve source from %s" % url) if isinstance(source, int): if source == -1: return home(request, "URL's source is too long.") return home(request, "%s returned %s" % (url, source)) freudified = slip(source, url) if freudified is None: return home(request, "Encountered an error while freudifying %s" % url) return HttpResponse(content=freudified) return HttpResponseRedirect(reverse('slip_base_url'))
def reply_message(msg): global DEBUG source = get_source(msg) print(msg) print(source) rtn = dict() # dictionary to store the actions if source: if msg['Type'] == 'Text': content = msg['Text'] if content.startswith(' '): print('Received: ' + content) reply, status = get_response(content.strip()) if not reply: print('Getting no response') return rtn if status is True or status is False: DEBUG = not status if reply == 'tmp.jpg': if not DEBUG: itchat.send_image('tmp.jpg', source) else: print('Sending image tmp.jpg to {}'.format(source)) rtn['image'] = ('tmp.jpg', source) return rtn reply = u'机器人: ' + reply if not DEBUG: itchat.send(reply, source) else: print('Sending {} to {}'.format(reply, source)) rtn['text'] = (reply, source) return rtn elif msg['Type'] == 'Picture': filename = msg['FileName'] try: msg['Text'](filename) shutil.move(filename, 'img/' + re.sub('.png', '.jpg', filename)) except Exception as e: print(e) try: start_time = time.time() info = convert_image('img/' + re.sub('.png', '.jpg', filename), 'tmp.jpg') print('{} seconds used for image classification'.format( time.time() - start_time)) if info: print('output: ' + info) reply = u'机器人: ' + info if DEBUG: print('Sending {} to {}'.format(reply, source)) else: itchat.send(reply, source) rtn['text'] = (reply, source) return rtn except Exception as e: print(e) else: print('Ignored') return rtn
def get_source(self, status): source = '' if hasattr(status, 'source'): source = get_source(status.source) return source
def test_get_source(self): source = '<a href="http://tyrs.nicosphere.net/" rel="nofollow">tyrs</a>' result = utils.get_source(source) self.assertEqual(result, 'tyrs')
def get_source(self, status): source = "" if hasattr(status, "source"): source = get_source(status.source) return source
def get_source(self, status): if hasattr(status, 'source'): source = get_source(status.source) else: source = '' return source
def start_requests(self): dataset = get_source() for data in dataset: if data.code == "": splash_url = self.splash_api_1 + data.id + self.splash_api_2 yield scrapy.Request(url=splash_url, callback=self.parse)
def get_source(self): return utils.get_source(self._original)