def init_like(): initParse() query = ParsePy.ParseQuery("LikeNode") nodes = query.fetch() query = ParsePy.ParseQuery("LikeEdge") edges = query.fetch() return nodes
def new_company(request): if request.method == 'POST': # if form has been submitted form = CompanyForm(request.POST) if form.is_valid(): newCompany = ParsePy.ParseObject("Company") newCompany.name = form.cleaned_data['name'] newCompany.description = form.cleaned_data['description'] newCompany.save() return HttpResponseRedirect('/thanks/') else: parents = ParsePy.ParseQuery("Company").fetch() form = CompanyForm() # if form not submitted return render(request, 'new_company.html', { 'form': form, })
def load_db(street_name=u"信義路", direction=u"東"): #dirname = street_name + "_" + direction global dirname dirpath = os.path.join(dbname, dirname) query = ParsePy.ParseQuery("Node") query = query.limit(10000).eq("streetName", street_name).eq("direction", direction) nodes = query.fetch() print "There're %d nodes on the server." % (len(nodes)) global files if not os.path.exists(dirpath): fetch_data(nodes, street_name, direction) else: files = [f for f in os.listdir(dirpath) if f[-3:] == "jpg"] data_file_path = os.path.join(dbname, dirname, data_file_name) if os.path.isfile(data_file_path): f = open(data_file_path, 'r') global db_dict db_dict = json.load(f) if db_dict is None: db_dict = {} if not ((len(files) / 8 == len(nodes)) and (len(db_dict) / 8 == len(nodes))): fetch_data(nodes, street_name, direction) else: print "Your database is up-to-date." else: print "data.json file does not exists." fetch_data(nodes, street_name, direction) global data_points data_points = [geopy.Point(n.lat, n.lng) for n in nodes]
def fetchGraph(): g = Graph() query = ParsePy.ParseQuery("Node") nodes = query.fetch() query = ParsePy.ParseQuery("Edge") edges = query.fetch() g.add_vertices(len(nodes)) for node in nodes: v = g.vs[nodes.index(node)] v["checked"] = False v["nid"] = str(nodes.index(node)) v["lat"] = node.lat v["lng"] = node.lng
def produce(request): # create random item newItem = ParsePy.ParseObject("Item") newItem.title = "Test Item" newItem.description = "Testing parse with Django" newItem.unethical_animal_practices = False newItem.corporate_irresponsibility = False newItem.environmentally_unsustainable = False newItem.exploited_labor = False newItem.save()
def get_clip_data(video_name="IMG_2124.mov", id="HK6Kyn3LMr"): #default id is for IMG_2124.mov global testcase_data testcase_data = ParsePy.ParseQuery("Clip").get(id) video_path = os.path.join("testcase", video_name) if os.path.isfile(video_path): global testcase_video testcase_video = cv2.VideoCapture(video_path) if not testcase_video.isOpened(): testcase_video = None print "ERROR: video file isn't opened" else: print "Plase place " + video_name + " under directory testcase/"
def fetchGraph(name=""): g = Graph() print "Start Downloading" query = ParsePy.ParseQuery(name + "Node") nodes = query.fetch() query = ParsePy.ParseQuery(name + "Edge") edges = query.fetch() print "Download Complete" g.add_vertices(len(nodes)) for node in nodes: v = g.vs[nodes.index(node)] v["checked"] = False v["nid"] = str(nodes.index(node)) v["objectId"] = node.objectId() v["lat"] = node.lat v["lng"] = node.lng for i, edge in enumerate(edges): v1 = g.vs.select(objectId=edge.node1.objectId())[0] v2 = g.vs.select(objectId=edge.node2.objectId())[0] g.add_edges((int(v1["nid"]), int(v2["nid"]))) g.es[i]["n"] = {v1["nid"]: edge.bearing1, v2["nid"]: edge.bearing2} g.es[i]["length"] = edge.length g.es[i]["n1"] = v1["nid"] g.es[i]["n2"] = v2["nid"] g.es[i]["bearing1"] = edge.bearing1 g.es[i]["bearing2"] = edge.bearing2 g.es[i]["checked"] = False return g
def fetch_data(street_name, direction): print "Fetching data for " + street_name query = ParsePy.ParseQuery("Node") query = query.limit(10000).eq("streetName", street_name).eq("direction", direction) nodes = query.fetch() data = [] for n in nodes: print "(" + str(n.lat) + ", " + str(n.lng) + ")" data.append([n.lat, n.lng]) for i in range(8): url = imageurl(n.lat, n.lng, i * 45) save_image(url, "sv_%f_%f_%d.jpg" % (n.lat, n.lng, i)) print "Output images are in images/%s/" % (sys.argv[3]) f = open("images/%s/data.csv" % (sys.argv[3]), 'w') w = csv.writer(f) w.writerows(data) f.close()
def companies(request): companies = ParsePy.ParseQuery("Company").fetch() return render_to_response('companies.html', {'companies': companies})
from boto.s3.key import Key from boto.sqs.message import Message from boto.sqs.connection import SQSConnection from boto.s3.connection import S3Connection AWS_ACCESS_KEY = '' AWS_SECRET_KEY = '' ParsePy.APPLICATION_ID = "" ParsePy.MASTER_KEY = "" conn = SQSConnection(AWS_ACCESS_KEY, AWS_SECRET_KEY) convert_q = conn.create_queue('FZconvertQueue', 120) upload_q = conn.create_queue('FZuploadQueue', 120) convert_q.clear() upload_q.clear() query = ParsePy.ParseQuery("UploadObject") fzobjects = query.fetch() for fzobj in fzobjects: print fzobj.objectId() fzobj.delete() print convert_q.count() print upload_q.count() print len(fzobjects)
# Connect to Amazon S3 S3 = boto.connect_s3(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY) BUCKET = S3.get_bucket(BUCKET_NAME) k = Key(BUCKET) # Grab the Parse records from the last hour ParsePy.APPLICATION_ID = PARSE_APP_ID ParsePy.MASTER_KEY = PARSE_MASTER_ID timeNow = datetime.datetime.utcnow() time1hr = timeNow - datetime.timedelta(0, 60 * 60 * 1) time1hrISO = time1hr.strftime("%Y-%m-%dT%H:%M:%S.%f")[0:-3] + 'Z' time1hrISO = {"__type": "Date", "iso": time1hrISO} query = ParsePy.ParseQuery("Juxta") query = query.gt("createdAt", time1hrISO) Juxtas = query.fetch() # Create a top-level temporary folder for the job TEMP_DIR_PATH = os.path.join(os.getcwd(), TEMP_DIR) os.chdir(TEMP_DIR_PATH) randTopDir = ''.join(str(random.randint(0, 9)) for _ in range(20)) os.mkdir(randTopDir) os.chdir(os.path.join(os.getcwd(), randTopDir)) # Iterate through the images to be juxtaposed for j in Juxtas: # Change to a new subdirectory randSubDir = ''.join(str(random.randint(0, 9)) for _ in range(20)) os.mkdir(randSubDir)
def processMessageFromSQS(temp_message): message_dict = json.loads(temp_message.get_body()) convert_q.delete_message(temp_message) pprint.pprint(message_dict) figureObject = ParsePy.ParseQuery("UploadObject").get( message_dict['parseID']) if figureObject == None: print 'object not found' figureObject.stat = 2 figureObject.processingStatus = 'Upload found.' figureObject.published = False figureObject.save() figureObject.figureTitle = '' figureObject.figureDescription = '''''' # try: # figureObject = ParsePy.ParseObject("UploadObject") # figureObject.uploadUser = message_dict['parseID'] figureObject.uploadEmailAddress = message_dict['emailAddress'] figureObject.tag = [''] figureObject.processingStatus = 'Upload message received' print figureObject.processingStatus # folder ID rootID = message_dict['rootID'] figureObject.rootID = rootID # image ID imageID = str(uuid.uuid4()) figureObject.imageID = imageID # shortened URL viewURL = 'http://figurezero.com/fz/%s' % (figureObject.objectId()) bitly = bitly_api.Connection('figurezero', 'BITLY_KEY') shortenedURL = bitly.shorten(viewURL) figureObject.shortenedURL = shortenedURL['url'] print shortenedURL # QR code url qrLinkURL = shortenedURL['url'] + '.qrcode?s=400' print qrLinkURL figureObject.qrLinkURL = qrLinkURL # path where file will be downloaded from S3 to for processing local_path = os.path.abspath(os.path.curdir) + '/local/' + rootID + '/' original_filename = message_dict['key'].split('/')[-1] original_extension = os.path.splitext(original_filename)[-1] local_filename = local_path + '%s%s' % (imageID, original_extension) converted_filename = local_path + '%s.png' % (imageID) converted_filenameStatic = local_path + '%s-512.jpg' % (imageID) figureObject.localPath = local_path figureObject.localFilename = local_filename figureObject.originalFileSize = message_dict['size'] figureObject.originalExtension = original_extension figureObject.originalFilename = original_filename figureObject.convertedFilename = converted_filename figureObject.convertedStaticImage = converted_filenameStatic # create s3 connection s3conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY) bucket = s3conn.get_bucket('figurezero') key = bucket.get_key(message_dict['key']) # verify key exists on if key.exists(): # update status figureObject.processingStatus = 'Key found, downloading' print figureObject.processingStatus #verify download directory exists, if not create if not os.path.exists(local_path): figureObject.processingStatus = 'Path doesnt exist, creating' print figureObject.processingStatus os.makedirs(local_path) else: figureObject.processingStatus = 'Path doesnt exist, creating' print figureObject.processingStatus #download to local key.get_contents_to_filename(local_filename) figureObject.stat = 3 figureObject.processingStatus = 'Transferred to conversion server.' figureObject.save() if os.path.exists(local_filename): # update status figureObject.processingStatus = 'File downloaded, engage conversion' print figureObject.processingStatus if original_extension.lower() in [ '.pdf', '.png', '.jpg', '.jpeg', '.tif', '.tiff', '.psd', '.ps', '.svg', '.jp2' ]: # inkscape sumatra_poster_Neuroinf2011.svg --export-png=testposter.png -d 96 if 'svg' in original_extension.lower(): cmdstr = '/usr/bin/inkscape %s -d 96 --export-png=%s ' % ( local_filename, converted_filename) pcmd = os.popen(cmdstr) for e in pcmd: print e scale_cmdstr = '/usr/bin/convert %s -quality 80 -resize 512x512 %s' % ( converted_filename, converted_filenameStatic) pcmd = os.popen(scale_cmdstr) for e in pcmd: print e else: cmdstr = '/usr/bin/convert -density 96 %s %s ' % ( local_filename, converted_filename) pcmd = os.popen(cmdstr) for e in pcmd: print e scale_cmdstr = '/usr/bin/convert %s -quality 80 -resize 512x512 %s' % ( converted_filename, converted_filenameStatic) pcmd = os.popen(scale_cmdstr) for e in pcmd: print e print os.path.abspath(os.curdir) # update status figureObject.processingStatus = 'File converted, Starting tile process' print figureObject.processingStatus figureObject.stat = 4 # convert okay figureObject.processingStatus = 'Converted into standard form.' figureObject.save() cmdstr = '/usr/bin/identify -format "%%wx%%h" %s ' % ( converted_filename) pcmd = os.popen(cmdstr) firstLine = True for e in pcmd: if firstLine: figureObject.originalImageSize = e firstLine = False print e import opentiler opentiler.tile(converted_filename, False, local_path, local_path, False, 1, 1) # update status figureObject.stat = 5 # tiled okay figureObject.processingStatus = 'Tiles created.' print figureObject.processingStatus static_location = '%s/s3/%s/%s-512.jpg' % (activedomain, rootID, imageID) image_properties_location = '%s/s3/%s/%s/ImageProperties.xml' % ( activedomain, rootID, imageID) tile_location = '%s/s3/%s/%s/' % (activedomain, rootID, imageID) thumbnail_location = '%s/s3/%s/%s/TileGroup0/0-0-0.jpg' % ( activedomain, rootID, imageID) s3image_properties_location = 'https://s3.amazonaws.com/fzero/tile/%s/%s/ImageProperties.xml' % ( rootID, imageID) s3tile_location = 'https://s3.amazonaws.com/fzero/tile/%s/%s/' % ( rootID, imageID) s3thumbnail_location = 'https://s3.amazonaws.com/fzero/tile/%s/%s/TileGroup0/0-0-0.jpg' % ( rootID, imageID) s3static = 'https://s3.amazonaws.com/fzero/tile/%s/%s-512.jpg' % ( rootID, imageID) figureObject.s3static = s3static figureObject.static_location = static_location figureObject.imagePropertiesLocation = image_properties_location figureObject.s3imagePropertiesLocation = s3image_properties_location figureObject.tileLocation = tile_location figureObject.s3tileLocation = s3tile_location figureObject.thumbnailLocation = thumbnail_location figureObject.s3thumbnailLocation = s3thumbnail_location figureObject.referenceArray = [] figureObject.viewArray = [] figureObject.save() #create upload message new_m = Message() message_dict = {} message_dict['upload_dir'] = local_path message_dict['upload_bucket'] = 'fzero' message_dict['rootID'] = rootID message_dict['parseID'] = figureObject.objectId() # message_dict['key'] = key_name # message_dict['tilekey'] = uuid_string new_m.set_body(json.dumps(message_dict)) status = upload_q.write(new_m) # else downloaded file does not exist else: # update status figureObject.processingStatus = 'Error occurred during download' print figureObject.processingStatus figureObject.save() # else key not found else: figureObject.processingStatus = 'Key not found in S3 bucket' print figureObject.processingStatus figureObject.save()
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1]) hsv[..., 0] = ang * 180 / np.pi / 2 hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX) rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) rgb_gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY) m, n = rgb_gray.shape mask = np.zeros_like(rgb[..., 0]) for a in range(0, m): for b in range(0, n): if rgb_gray[a][b] < 10: mask[a][b] = 255 cv2.imwrite('mask-2111/frame-' + str(0.2 * i) + '.jpg', frame) cv2.imwrite('mask-2111/mask-' + str(0.2 * i) + '.jpg', mask) cv2.imwrite('mask-2111/rgb-' + str(0.2 * i) + '.jpg', rgb) #mask_dict[str(i*0.2)] = mask """# Now update the previous frame and previous points old_gray = frame_gray.copy() p0 = good_new.reshape(-1,1,2) prvs = nextf""" cap.release() init_parse() load_db() cap = cv2.VideoCapture('streetview-data/IMG_2111.mov') testcase_data = ParsePy.ParseQuery("Clip").get('Pb9erVwDty') generate_mask(cap, testcase_data.length)