def character_assets_by_market_group(request, charID, marketGroupID=None): response = HttpResponse(mimetype="application/json") logger.debug("User %s request assets for character %s by market group %s" % (request.user.username, charID, marketGroupID)) char = Character.objects.get(pk=charID) expand = lambda obj: { "typeID":obj['typeID'], "typeName": invTypes.objects.get(pk=obj['typeID']).typeName, "locationName":mapDenormalize.objects.get(pk=obj['locationID']).itemName, "locationID":obj['locationID'], "quantity":obj['total'], } if marketGroupID == None: assets = char.assetList.asset_set.filter(typeID__marketGroupID = marketGroupID).values('typeID','locationID').annotate(total = Sum('quantity')).order_by('-total') result = [expand(x) for x in assets] jsonResponse = JSONResponse(success=True, result=result) response.write(jsonResponse.json()) else: marketGroup = invMarketGroups.objects.get(pk=marketGroupID) assets = char.assetList.asset_set.filter(typeID__marketGroupID__in = marketGroup.findMarketGroupIDs()).values('typeID','locationID').annotate(total = Sum('quantity')).order_by('-total') result = [expand(x) for x in assets] jsonResponse = JSONResponse(success=True, result=result) response.write(jsonResponse.json()) return response
def corporationAssetsByMarketGroup(request, corpID, marketGroupID=None): response = HttpResponse(mimetype="application/json") corp = Corporation.objects.get(pk=corpID) expand = lambda obj: { "typeID":obj['typeID'], "typeName": invTypes.objects.get(pk=obj['typeID']).typeName, "locationName":mapDenormalize.objects.get(pk=obj['locationID']).itemName, "locationID":obj['locationID'], "quantity":obj['total'], } if marketGroupID == None: assets = corp.assetList.asset_set.filter(typeID__marketGroupID = marketGroupID).values('typeID','locationID').annotate(total = Sum('quantity')).order_by('-total') result = [expand(x) for x in assets] jsonResponse = JSONResponse(success=True, result=result) response.write(jsonResponse.json()) else: marketGroup = invMarketGroups.objects.get(pk=marketGroupID) assets = corp.assetList.asset_set.filter(typeID__marketGroupID__in = marketGroup.findMarketGroupIDs()).values('typeID','locationID').annotate(total = Sum('quantity')).order_by('-total') print assets[0] result = [expand(x) for x in assets] jsonResponse = JSONResponse(success=True, result=result) response.write(jsonResponse.json()) return response
def post_import_session(request, project_id="0"): project_id = int(project_id) try: # Accumulate content from uploaded file f = request.FILES['userfile'] content = '' for chunk in f.chunks(): content = content + chunk j = json.loads(content) j['project_id'] = project_id # change project to target project_id # Import data into database, using the same serializer used to export serializer = SessionExportSerializer(data=j) if not serializer.is_valid(): return JSONResponse({'message': 'Invalid Data'}, status=500) serializer.save() except Exception as e: client.captureException() return JSONResponse({'message': 'Error: %s' % e}, status=500) return HttpResponse()
def removeAPIKey(request): response = HttpResponse(mimetype="application/json") keyID = request.POST["keyID"] APIKey.objects.filter(pk=keyID).delete() jsonResponse = JSONResponse(success=True) response.write(jsonResponse.json()) return response
def wrapper(request, corpID, *args, **kwargs): response = HttpResponse(mimetype="application/json") corp = None try: corp = Corporation.objects.get(pk=corpID) except Exception, e: jsonResponse = JSONResponse(success=False, message=str(e)) response.write(jsonResponse.json()) return response
def take_export_path(request, take_id="0"): take_id = int(take_id) take = Take.objects.get(pk=take_id) if request.method == 'POST': j = json.loads(request.body) take.export_path = j['export_path'] take.save() return JSONResponse({'result': 'OK', 'export_path': take.export_path}) elif request.method == 'GET': return JSONResponse({'export_path': take.export_path}) else: return HttpResponse(status=500)
def post_new_shot(request, location_id="0"): if request.method == 'POST': location_id = int(location_id) # security check loc = request.user.access_rights.filter( id=location_id, locationaccess__write_access=True) if not loc: return HttpResponse(status=403) # read option from request j = json.loads(request.body) if not 'name' in j: return HttpResponse(status=500) shot_name = j['name'] # Check if shot name is unique, otherwise, increment it shot_name = unique_shot_name(shot_name, loc[0].cur_session) # Create new shot shot = ArchiveShot(name=shot_name, session=loc[0].cur_session) shot.save() loc[0].cur_shot = shot loc[0].save() return JSONResponse({'id': shot.id, 'name': shot.name})
def post_toggle_capturing(request): if request.method == 'POST': j = json.loads(request.body) if 'camera_id' in j: cam = Camera.objects.get(pk=j['camera_id']) if cam: if not has_write_access_node(request, cam.node): return HttpResponse(status=403) url = 'http://%s:8080/toggle_capturing/%s/' % ( cam.node.ip_address, cam.unique_id) try: requests.post(url, timeout=DEFAULT_NODE_HTTP_TIMEOUT) except Exception as e: g_logger.error('%s: %s' % (url, e)) return HttpResponse() return JSONResponse( {'message': 'Camera %d not found' % j['camera_id']}, status=404) return HttpResponse(status=500)
def get(self, request, **kwargs): """ Returns the list of sandboxes owned by the currently authenticated app. """ sandboxes = Sandbox.objects.filter(owner_app=request.app) sandboxes_dict = [s.to_dict() for s in sandboxes] return JSONResponse({'sandboxes': sandboxes_dict})
def job_mesh(request, job_id="0"): job_id = int(job_id) job = FarmJob.objects.get(pk=job_id) if not job: return HttpResponse(status=404) if not request.FILES: return HttpResponse(status=500) ext = os.path.splitext(request.FILES['file'].name)[1] filename = 'j%08d_%s%s' % (job_id, uuid_node_base36(), ext) filepath = os.path.join(BASE_DIR, 'static', 'thumb', filename) # Write file to disk try: f = request.FILES['file'] with open(filepath, 'wb') as destination: for chunk in f.chunks(): destination.write(chunk) job.mesh_filename = filename job.save() except Exception as e: client.captureException() return JSONResponse({'message': '%s' % e}, status=500) return HttpResponse()
def location_config(request, location_id="0"): location_id = int(location_id) if request.method == 'GET': loc = CaptureLocation.objects.filter( id=location_id, read_access_all=True) # Try with locations readable by all if not loc: loc = request.user.access_rights.filter( id=location_id, locationaccess__read_access=True) # Filter by access rights if loc: result = {} result['hardware_sync_frequency'] = loc[0].hardware_sync_frequency result['pulse_duration'] = loc[0].pulse_duration result['external_sync'] = loc[0].external_sync return JSONResponse(result) return HttpResponse(status=403) if request.method == 'POST': msgs = [] location_id = int(location_id) g = request.user.access_rights.filter( id=location_id, locationaccess__write_access=True) if g: j = json.loads(request.body) # Store location options in DB if 'pulse_duration' in j: g[0].pulse_duration = int(j['pulse_duration']) if 'frequency' in j: g[0].hardware_sync_frequency = int(j['frequency']) if 'external_sync' in j: g[0].external_sync = bool(j['external_sync']) if 'display_focus_peak' in j: g[0].display_focus_peak = bool(j['display_focus_peak']) if 'display_overexposed' in j: g[0].display_overexposed = bool(j['display_overexposed']) if 'display_histogram' in j: g[0].display_histogram = bool(j['display_histogram']) if 'bitdepth_avi' in j: g[0].bitdepth_avi = int(j['bitdepth_avi']) if 'bitdepth_single' in j: g[0].bitdepth_single = int(j['bitdepth_single']) # set options on all nodes nodes = CaptureNode.objects.filter(online=True, location__id=location_id) g_pool.map(apply_options_on_node, [(n, request.body, msgs) for n in nodes]) g[0].save() return HttpResponse('Ok ' + ' '.join(msgs))
def logout(request): """simply logout the current user""" response = HttpResponse(mimetype='application/json') if request.user.is_authenticated(): username = request.user.username auth.logout(request) message = JSONResponse(success=True, message="Logout successful") response.write(message.json()) logger.info("User %s logged out" % username) else: message = JSONResponse(success=False, message="You must login before you logout") response.write(message.json()) logger.error("User tried to logout but wasn't logged in.") return response
def addAPIKey(request): response = HttpResponse(mimetype="application/json") print request.body keyID = request.POST["keyID"] vCode = request.POST["vCode"] name = request.POST["name"] user = request.user apiKey = APIKey(keyID=keyID, vCode=vCode, name=name, user=user) apiKey.save() updateAPIKey.delay(apiKey.pk) jsonResponse = JSONResponse() response.write(jsonResponse.json()) return response
def job_detailed(request, job_id="0"): job = FarmJob.objects.get(pk=int(job_id)) if not job: return HttpResponse(status=404) serializer = FarmJobDetailedSerializer(job, many=False, context={'request':request}) return JSONResponse(serializer.data)
def node_detailed(request, node_id="0"): node = FarmNode.objects.get(pk=int(node_id)) if not node: return HttpResponse(status=404) serializer = FarmNodeSerializer(node, many=False, context={'request':request}) return JSONResponse(serializer.data)
def node_shutdown(request): g_logger.debug('Node Shutdown %s' % request.data['ip_address']) nodes = CaptureNode.objects.filter(ip_address=request.data['ip_address']) for node in nodes: node.online = False node.save() return JSONResponse({'Result': 'OK'})
def corporationAssetsByTypeName(request, corpID, typeName=""): response = HttpResponse(mimetype="application/json") corp = Corporation.objects.get(pk=corpID) expand = lambda obj: { "typeID":obj['typeID'], "typeName": invTypes.objects.get(pk=obj['typeID']).typeName, "locationName":mapDenormalize.objects.get(pk=obj['locationID']).itemName, "locationID":obj['locationID'], "quantity":obj['total'], } assets = corp.assetList.asset_set.filter(typeID__typeName__icontains = typeName).values('typeID','locationID').annotate(total = Sum('quantity')).order_by('-total')[:10] result = [expand(x) for x in assets] jsonResponse = JSONResponse(success=True, result=result) response.write(jsonResponse.json()) return response
def apiKeys(request): response = HttpResponse(mimetype="application/json") keys = request.user.apikey_set.all() tasks = [] for key in keys: if key.expired(): tasks.append(updateAPIKey.subtask([key.pk])) job = TaskSet(tasks=tasks) result = job.apply_async() print result.join() keys = request.user.apikey_set.all() jsonResponse = JSONResponse(success=True, result=keys) response.write(jsonResponse.json()) return response
def corporationAssetsDetailTree(request, corpID, typeID, locationID): response = HttpResponse(mimetype="application/json") corp = Corporation.objects.get(pk=corpID) assets = corp.assetList.asset_set.filter(typeID=typeID, locationID=locationID) paths = [x.getPath() for x in assets] root = Node(locationID) for path in paths: path.reverse() for asset in path: node = root.find(asset) if not node: node = root.find(asset.parent) if node: node.insert(asset) else: root.insert(asset) root.generateQuantity() expand = lambda obj: { "ID":obj.data.pk, "typeID":obj.data.typeID_id, "typeName":obj.data.typeID.typeName, "flag":obj.data.flag.flagName, "quantity":obj.quantity, "childs":[expand(x) for x in obj.childs], } result = [expand(x) for x in root.childs] jsonResponse = JSONResponse(success=True, result=result) response.write(jsonResponse.json()) return response
def job_output(request, job_id="0"): job_id = int(job_id) job = FarmJob.objects.get(pk=job_id) if not job: return HttpResponse(status=404) filepath = os.path.join(BASE_DIR, 'static', 'thumb', '%08d.output' % job_id) if request.method == 'POST': f = request.FILES['file'] #print('Writing %d bytes to %s' % (f.size,filepath)) with open(filepath, 'ab') as destination: for chunk in f.chunks(): destination.write(chunk) return HttpResponse() if request.method == 'GET': offset = int(request.GET.get('offset', 0)) length = int(request.GET.get('length', 1024 * 1024)) # max chunk size # Return content of stored file try: with open(filepath, 'rb') as f: f.seek(offset) data = f.read(length) try: conv = Ansi2HTMLConverter() html = conv.convert(data.decode('utf-8'), full=False) return JSONResponse({ 'content': html, 'length': len(data), 'status': job.status }) except: return JSONResponse({ 'content': data, 'length': len(data), 'status': job.status }) except Exception as e: return HttpResponse('Exception: %s' % e, status=404)
def camera_detailed(request, location_id="0", camera_id="0"): # Get details of one camera, including large thumbnail location_id = int(location_id) camera_id = int(camera_id) # Check permission for this location_id read_access = True write_access = False if location_id: read_access = CaptureLocation.objects.get( pk=location_id ).read_access_all or request.user.access_rights.filter( id=location_id, locationaccess__read_access=True).count() > 0 if not read_access: return HttpResponse(status=403) write_access = request.user.access_rights.filter( id=location_id, locationaccess__write_access=True).count() > 0 # filter camera by location and id cameras = Camera.objects.filter(node__location=location_id, id=camera_id) if cameras: camera = cameras[0] serializer = CameraSerializer(camera, many=False, context={'request': request}) data = serializer.data result = {} result['camera'] = data # Fetch image data directly from capture node try: jpeg_data = urllib2.urlopen( 'http://%s:8080/camera/%s/large_preview' % (camera.node.ip_address, camera.unique_id), timeout=DEFAULT_NODE_HTTP_TIMEOUT).read() result['jpeg_full'] = b64encode(jpeg_data) except Exception as e: g_logger.error('large_preview %s: %s' % (camera.node.machine_name, e)) return JSONResponse(result) else: return HttpResponse(status=404)
def corporations(request): response = HttpResponse(mimetype="application/json") tasks = [] for key in request.user.apikey_set.all(): for corp in key.corporation_set.all(): if corp.expired(): tasks.append(updateCorporation.subtask([corp.pk])) job = TaskSet(tasks=tasks) result = job.apply_async() print result.join() corps = Corporation.objects.none() for key in request.user.apikey_set.all(): corps = corps | key.corporation_set.all() jsonResponse = JSONResponse(success=True, result=corps) response.write(jsonResponse.json()) return response
def get(self, request, sandbox_slug): """ Returns information about a specific sandbox owned by the currently authenticated app and identified by the given sandbox_slug """ try: sandbox = Sandbox.objects.get(slug=sandbox_slug, owner_app=request.app) except Sandbox.DoesNotExist: return HttpResponseNotFound('SandBox not found') if sandbox.status != 'terminated': logic.fetch_logs(sandbox) sandbox_dict = sandbox.to_dict() return JSONResponse(content=sandbox_dict)
def get_locations(request): # Use this opportunity to update any timed-out location CaptureNode.objects.filter( online=True, last_seen__lt=timezone.now() - datetime.timedelta(seconds=90)).update(online=False) result = {} result['locations'] = [] for loc in CaptureLocation.objects.all(): read_access = loc.read_access_all or loc.users.filter( id=request.user.id, locationaccess__read_access=True).count() > 0 result['locations'].append({ 'name': loc.name, 'id': loc.id, 'active': Camera.objects.filter(node__online=True, node__last_seen__gt=timezone.now() - datetime.timedelta(seconds=90), node__location=loc.id).count(), 'access': read_access }) unknown_count = Camera.objects.filter(node__online=True, node__last_seen__gt=timezone.now() - datetime.timedelta(seconds=90), node__location__isnull=True).count() if unknown_count > 0: result['locations'].append({ 'name': 'Unknown', 'id': 0, 'active': unknown_count, 'access': True }) # Add extra statistics about the whole system result['nb_running_jobs'] = FarmJob.objects.filter( status='running').count() result['nb_queued_jobs'] = FarmJob.objects.filter(status='ready').count() result['nb_farmnodes_active'] = FarmNode.objects.filter( status='accepting').filter(last_seen__gt=timezone.now() - datetime.timedelta(seconds=90)).count() return JSONResponse(result)
def get_archive_json_file(request, session_id="0"): session_id = int(session_id) queryset = Session.objects.get(pk=session_id) if not queryset: return HttpResponse(status=404) serializer = SessionExportSerializer(queryset, many=False, context={'request': request}) response = JSONResponse(serializer.data) filename = 'exported_session_%d.json' % session_id response['Content-Disposition'] = 'attachment; filename=%s' % filename return response
def camera_parameter(request, location_id="0"): location_id = int(location_id) if request.method == 'POST': location_id = int(location_id) g = request.user.access_rights.filter( id=location_id, locationaccess__write_access=True) if not g: return HttpResponse(status=403) j = json.loads(request.body) camera = Camera.objects.get(pk=j['cam_id']) if not camera: return HttpResponse(status=404) # Some parameters are saved in the DB # TODO Generalize DB for parameters if j['parameter_name'] == 'exposure': camera.exposure = j['value'] camera.save() if j['parameter_name'] == 'lens_aperture_value': camera.lens_aperture = j['value'] camera.save() if j['parameter_name'] == 'gain': camera.gain = j['value'] camera.save() # Update Node with new value try: r = requests.post('http://%s:8080/camera/%s/%s/%s' % (camera.node.ip_address, camera.unique_id, j['parameter_name'], j['value']), data="", timeout=DEFAULT_NODE_HTTP_TIMEOUT) return JSONResponse(r.json()) except Exception as e: g_logger.error('camera_parameter %s: %s' % (camera.node.machine_name, e))
def post_new_session(request, location_id="0"): if request.method == 'POST': location_id = int(location_id) # Create new session at this location g = request.user.access_rights.filter( id=location_id, locationaccess__write_access=True) if not g: return HttpResponse(status=403) loc = CaptureLocation.objects.get(pk=location_id) if not loc: return HttpResponse(status=404) j = json.loads(request.body) if not 'name' in j: return HttpResponse(status=500) session_name = j['name'] # Is the session name unique? i = 0 while ArchiveSession.objects.filter( name=session_name, project=loc.cur_project).count() > 0: session_name = '%s_%03d' % (j['name'], i) i = i + 1 # Create New Session session = ArchiveSession(name=session_name, project=loc.cur_project) session.save() g[0].cur_session = session g[0].cur_shot = None g[0].save() result = {} result['session_name'] = session.name result['session_id'] = session.id return JSONResponse(result)
def all_users(request): queryset = User.objects.all().order_by('-last_login') serializer = UserSerializer(queryset, many=True, context={'request': request}) data = serializer.data for u in data: u['gravatar'] = get_gravatar_url(u['email'], size=150) u['lastMessage'] = naturaltime(u['last_login']) u['messages'] = [] # [{ # // text: 'Hey! What\'s up?' # // }, { # // text: 'Are you there?' # // }, { # // text: 'Let me know when you come back.' # // }, { # // text: 'I am here!', # // fromMe: true # // }] return JSONResponse(data)
def login(request): """Allow a user to login Used HTTP POST variables: username - the username password - the password """ response = HttpResponse(mimetype='application/json') auth.logout(request); if 'username' not in request.POST or 'password' not in request.POST: message = JSONResponse(success=False, message="Missing POST parameter!") response.write(message.json()) logger.error("Invalid login: Username or password not set.") return response user = auth.authenticate(username=request.POST['username'], password=request.POST['password']) if user is None: message = JSONResponse(success=False, message="Login failed!") response.write(message.json()); logger.warning("User %s tried to login, but login failed (wrong password or account does not exists)" % request.POST['username']) return response else: if user.is_active: auth.login(request, user) message = JSONResponse(success=True, message="Login successful") response.write(message.json()) logger.info("Login successful for user %s" % user) return response else: message = JSONResponse(success=False, message="User not active! Activate your account first") response.write(message.json()) logger.warning("User %s tried to login, but is not activated yet!" % user) return response
def post(self, request): """ Creates a new sandbox for the currently authenticated app. """ data = request.POST try: cmd = data['cmd'] image = data['image'] except (ValueError, KeyError): return HttpResponseBadRequest('Command or image slug missing') try: timeout = int(data['timeout']) except (KeyError): timeout = None except (ValueError): return HttpResponseBadRequest('Timeout has to be an integer') if ((timeout != None) and (timeout < 0)): return HttpResponseBadRequest('Timeout has to be positive') try: commands = json.loads(cmd) except ValueError: commands = [cmd] except KeyError: return HttpResponseBadRequest('Malformed cmd field') if (not (type(commands)) == list): return HttpResponseBadRequest('Malformed cmd field') try: image = Image.objects.get(slug=image) except: return HttpResponseBadRequest('No image found') files = request.FILES sandbox_slug = logic.create(request.app, commands, image, files, timeout) return JSONResponse(sandbox_slug)
def post_record_single_image(request): if request.method == 'POST': j = json.loads(request.body) if 'location' in j: location_id = j['location'] if not has_write_access_location_id(request, location_id): return HttpResponse(status=403) nodes = CaptureNode.objects.filter(location__id=location_id, online=True) session_id = j['session_id'] if 'session_id' in j else None shot_name = j['shot'] if 'shot' in j else None create_session_shot(location_id, session_id, shot_name) summary = {} summary['result'] = 'OK' summary['nodes'] = [] # Prepare Single Image Capture g_pool.map(parallel_all_prepare_single, nodes) g_pool.map(parallel_all_prepare_multi2, nodes) # Start Single Image Capture g_pool.map(parallel_all_start_single, nodes) # Finalize Single Image Capture g_pool.map(parallel_all_finalize_single, [(n, summary['nodes']) for n in nodes]) # Add rotation flag to all cameras (this info is in th DB, and does not come from the nodes) add_rotation_info_to_cameras(summary) # Store capture in archive register_new_take(location_id, summary) return JSONResponse(summary)
def post_stop_recording(request): if request.method == 'POST': j = json.loads(request.body) if 'location' in j: location_id = j['location'] if not has_write_access_location_id(request, location_id): return HttpResponse(status=403) summary = {} summary['result'] = 'OK' summary['nodes'] = [] loc = CaptureLocation.objects.get(pk=location_id) nodes = CaptureNode.objects.filter(location__id=location_id, online=True) # Pause Sync all nodes g_pool.map(parallel_stop_sync, nodes) # Delay for all cameras to catch up for the last frame being transfered from the camera time.sleep(0.5) # Stop Recording All Nodes g_pool.map(parallel_all_stop_recording, [(n, summary['nodes']) for n in nodes]) # Resume sync for preview on all nodes g_pool.map(parallel_resume_preview, nodes) # Add rotation flag to all cameras (this info is in th DB, and does not come from the nodes) add_rotation_info_to_cameras(summary) # Store capture in archive register_new_take(loc, summary, is_burst=False, is_scan=False) return JSONResponse(summary)
def response(self): if not self.files: return JSONResponse( {'message': 'No images received from ' + cam.machine_name}, status=500) elif len(self.files) == 1: response = HttpResponse( self.files[0].get('data'), content_type=self.files[0].get('content_type')) response[ 'Content-Disposition'] = 'attachment; filename="%s"' % self.files[ 0].get('filename') return response else: buff = StringIO.StringIO() zip_archive = zipfile.ZipFile(buff, mode='w') for f in self.files: zip_archive.writestr(f['filename'], f['data']) zip_archive.close() response = HttpResponse(buff.getvalue(), content_type='compressed/zip') response[ 'Content-Disposition'] = 'attachment; filename="%s"' % self.zip_filename return response
def corporation_permission_required(function): @wraps(function) def wrapper(request, corpID, *args, **kwargs): response = HttpResponse(mimetype="application/json") corp = None try: corp = Corporation.objects.get(pk=corpID) except Exception, e: jsonResponse = JSONResponse(success=False, message=str(e)) response.write(jsonResponse.json()) return response if not request.user.has_perm('eve.viewAssetList_corporation', corp): jsonResponse = JSONResponse(success=False, message="You don't have permission") response.write(jsonResponse.json()) return response if corp.assetList == None: result = updateAssetList.delay(corp.pk, "Corporation") jsonResponse = JSONResponse(success=True, taskID=result.task_id) response.write(jsonResponse.json()) return response if corp.assetList.expired(): result = updateAssetList.delay(corp.pk, "Corporation") jsonResponse = JSONResponse(success=True, taskID=result.task_id) response.write(jsonResponse.json()) return response else: return function(request, corpID, *args, **kwargs)
def post_client_discover(request): if request.method == 'POST': update_aws_status = False cleanup_dead_jobs(request) # TODO This could be on a schedule # Update database from recieved data r = json.loads(request.body) if 'status' not in r: raise Exception('Invalid request') # Look for existing machine in the database, with the same ip nodes = FarmNode.objects.filter(machine_name__iexact=r['machine_name']) if nodes: # Node exists in database, update it node = nodes[0] node.ip_address = request.data['ip_address'] if node.aws_instance_state != 'running': update_aws_status = True node.last_seen = timezone.now() else: # Node does not exist, create it node = FarmNode(ip_address=r['ip_address'], machine_name=r['machine_name']) update_aws_status = True metrics_client_last_seen.labels( node.machine_name).set_to_current_time() if 'system' in r: node.system = r['system'] if 'system_bits' in r: node.system_bits = r['system_bits'] if 'cpu_brand' in r: node.cpu_brand = r['cpu_brand'] if 'cpu_cores' in r: node.cpu_cores = r['cpu_cores'] if 'cuda_device_count' in r: node.gpu_count = r['cuda_device_count'] if 'restarted' in r and r['restarted']: node.req_restart = False if 'cpu_percent' in r: node.cpu_percent = r['cpu_percent'] metrics_client_cpu.labels(node.machine_name).set(node.cpu_percent) if 'mem_used' in r: node.virt_percent = r['mem_used'] if 'os_version' in r: node.os_version = r['os_version'] # AWS Cloud integration if update_aws_status: node.aws_instance_id, node.aws_instance_region, node.aws_instance_state = aws.instance_id_from_private_ip( node.ip_address) else: # AWS, check if this instance should be stopped for inactivity if node.aws_instance_should_be_stopped(): nb_aws_running = FarmNode.objects.filter( aws_instance_state='running').count() slack_notification( 'Stopping inactive AWS instance: *%s* (running:%d)' % (node.machine_name, nb_aws_running - 1), color='warning') node.aws_instance_state = aws.stop_instance( node.aws_instance_id, node.aws_instance_region) if FarmJob.objects.filter(status='running', node=node).count() > 0: node.last_job_activity = timezone.now() node.code_version = r['code_version'] if 'code_version' in r else 0 node.git_version = r['git_version'] if 'git_version' in r else '' node.status = r['status'] node.save() # Update tags on farm node, if client_tags are supplied, otherwise, keep tags in DB if 'client_tags' in r: tags = r.get('client_tags', []) if node.aws_instance_id: tags.append('aws') if tags != node.tags: with transaction.atomic(): node.tags.set(*tags, clear=True) # don't need node.save() else: # client did not specify any tags, use the ones in DB tags = node.tags.names() # In order to filter jobs by tags, we start with the list of all possible tags, then # remove the tags supported by this node. What remains is the list of tags that # cannot be fulfilled. Jobs with these tags should be filtered out. all_possible_tags = FarmNode.tags.all().values_list('name', flat=True) excluded_tags = [x for x in all_possible_tags if not x in tags] available_jobs = r['available_jobs'] if 'available_jobs' in r else [] jobs_to_terminate = [ job.id for job in FarmJob.objects.filter(status='terminating', node=node) ] # Update database from running and finished jobs (if they are not 'terminating') if 'running_jobs_progress' in r: for job_id, progress in r['running_jobs_progress']: FarmJob.objects.filter( id=job_id).filter(~Q(status='terminating')).filter( ~Q(status='running') | ~Q(progress=progress)).update( status='running', progress=progress, modified=timezone.now()) elif 'running_jobs' in r: FarmJob.objects.filter(id__in=r['running_jobs']).filter(~Q( status='terminating')).filter(~Q(status='running')).update( status='running', modified=timezone.now()) if 'finished_jobs' in r: for job in r['finished_jobs']: progress = job['progress'] if 'progress' in job else '' try: # Update job with new status this_job = FarmJob.objects.get(pk=job['job_id']) this_job.progress = progress if 'children' in job: # Yield to children for job_info in job['children']: if isinstance(job_info, dict): # Create child job target_node = None if 'node_name' in job_info: target_node = make_sure_node_exists( job_info['node_name']) child = FarmJob( job_class=job_info['job_class'], created_by=this_job.created_by, params=job_info['params'], status='ready', parent=this_job, node=target_node, req_version=this_job.req_version, req_gpu=this_job.req_gpu) child.save() else: # Backward compatibility code child = FarmJob(job_class=job_info[0], created_by=this_job.created_by, params=job_info[1], status='ready', parent=this_job) child.save() g_logger.info('Job #%s set to WAITING' % (job['job_id'])) this_job.status = 'waiting' elif 'success' in job and job['success']: g_logger.info('Job #%s set to SUCCESS' % (job['job_id'])) this_job.status = 'success' this_job.end_time = timezone.now() metrics_job_success_count.labels( node.machine_name).inc() else: g_logger.info('Job #%s set to FAILED' % (job['job_id'])) this_job.status = 'failed' this_job.exception = job['exception'] this_job.end_time = timezone.now() metrics_job_failed_count.labels( node.machine_name).inc() # Update parent job, if it exists this_job.save() onJobChanged(this_job, request) except ObjectDoesNotExist: pass # Job does not exist anymore if 'running_jobs' in r: # Jobs that are running according to the DB, but not according to the node for lost_job in FarmJob.objects.filter( Q(status='running') | Q(status='terminating')).filter( node=node).exclude(pk__in=r['running_jobs']): g_logger.info('Job #%d failed because not in running_jobs' % (lost_job.id)) lost_job.status = 'failed' lost_job.save() onJobChanged(lost_job, request) data = {} if node.status == 'accepting' and (node.aws_instance_id is None or node.aws_instance_state == 'running'): data['jobs'] = [] data['jobs_to_kill'] = [] data['req_restart'] = node.req_restart # Scheduler, reserve some tasks for specific machines if not node.req_restart: try: with transaction.atomic(): # Classes representing 2 different job channels, one for light jobs, one for heavy jobs # These two channels will be executing concurrently on the machines light_job_classes = [ 'jobs.thumbnails.GenerateThumbnail', 'jobs.test.SpeedTest' ] class Channel(): def __init__(self): self.max_instances = 1 self.nb_running = FarmJob.objects.filter( status='running', node=node).filter(self.filter_q()).count() def can_run(self): return self.nb_running < self.max_instances class LightChannel(Channel): def filter_q(self): return Q(job_class__in=light_job_classes) class HeavyChannel(Channel): def filter_q(self): return ~Q(job_class__in=light_job_classes) channels = [LightChannel(), HeavyChannel()] if True in [c.can_run() for c in channels]: # Query for all jobs we could run on this node if node.active: next_jobs = FarmJob.objects.select_for_update( ).filter( status='ready', req_version__lte=node.code_version ).filter(Q(node=node) | Q(node=None)).filter( job_class__in=available_jobs).exclude( tags__name__in=excluded_tags) else: next_jobs = FarmJob.objects.select_for_update( ).filter( status='ready', req_version__lte=node.code_version).filter( Q(node=node)).filter( job_class__in=available_jobs ).exclude(tags__name__in=excluded_tags) # Add filter for GPU if node.gpu_count <= 0: next_jobs = next_jobs.filter(req_gpu=False) # Sort jobs by priority next_jobs = next_jobs.order_by('-priority') # Create filters for each channel filter_q_list = [ c.filter_q() for c in channels if c.can_run() ] if filter_q_list: # Apply filter for each channel next_jobs = next_jobs.filter( or_list(filter_q_list)) # Go thru each job, check dependency, and exit as soon as one good job is found for next_job in next_jobs: # Check Job Dependencies (filter if that there are no dependencies that are not 'success') if next_job.dependencies.filter(~Q( status='success')).count() == 0: # TODO This should be in the same query, otherwise we may be looping for no reason g_logger.info( 'Job #%s RESERVED for %s' % (next_job.id, node.machine_name)) # Make sure there are no child on this job next_job.children.all().delete() # Send a single job to this machine next_job.status = 'reserved' next_job.node = node next_job.exception = None next_job.start_time = timezone.now() next_job.save() break except Exception as e: client.captureException() g_logger.error('Scheduler failed %s' % e) # Send reserved jobs to node jobs = FarmJob.objects.filter(status='reserved', node=node) for job in jobs: g_logger.info('Job #%s SUBMIT to %s' % (job.id, node.machine_name)) job_data = { 'job_id': job.id, 'job_class': job.job_class, 'params': job.params } data['jobs'].append(job_data) # Send jobs to kill to node for job_id in jobs_to_terminate: g_logger.info('Job #%s KILL to %s' % (job_id, node.machine_name)) data['jobs_to_kill'].append(job_id) metrics_client_nb_running.labels(node.machine_name).set( node.jobs.filter(status='running').count()) return JSONResponse(data)
def get_last_export_path(request): userdata = getUserData(request) return JSONResponse({'path': userdata.export_path})
def download_original(request): take_id = int(request.GET.get('take', 0)) cam_uid = request.GET.get('camera') take = Take.objects.get(pk=take_id) # Look for this camera in the Take cams = take.cameras.filter(unique_id=cam_uid) if cams: cam = cams[0] # Camera found if take.frame_count() > 1 and not (take.is_burst or take.is_scan_burst): return JSONResponse( {'message': 'Download not supported for sequences'}, status=500) frame_index_list = [0] # Find node associated with this file node = CaptureNode.objects.filter(machine_name=cam.machine_name) if not node: # Node not found in db return JSONResponse( {'message': 'Capture node not found: ' + cam.machine_name}, status=500) if take.is_burst or take.is_scan_burst: print('Downloading %d frames' % take.frame_count()) frame_index_list = range(take.frame_count()) # try to guess file extention from the all_files field on the camera extension = 'tif' if not '.tif' in cam.all_files and '.raw' in cam.all_files: extension = 'raw' image_packer = ImagePacker() image_packer.zip_filename = 'Take_%04d_raw.zip' % take_id for frame_index in frame_index_list: json_data = json.dumps({ 'folder': os.path.split(cam.folder)[0], 'unique_id': cam_uid, 'frame_index': frame_index, 'extension': extension }) if take.export_path: # Take was already exported, need to get image from exported location (if accessible) json_data['folder'] = take.export_path url = 'http://%s:8080/download/' % (node[0].ip_address) try: result = urllib2.urlopen(url, data=json_data, timeout=DEFAULT_NODE_HTTP_TIMEOUT) file_data = result.read() basename = 'take_%d_%s_%04d.%s' % (take_id, cam_uid, frame_index, extension) image_packer.add(basename, file_data, result.info().type) except Exception as e: client.captureException() g_logger.error('post_toggle_using_sync %s: %s' % (cam.machine_name, e)) return JSONResponse( { 'message': 'Could not download file from %s' % cam.machine_name }, status=500) return image_packer.response() # Camera not found in take return HttpResponse(status=404)
def get(self, request): images = Image.objects.all() slugs = [] for image in images: slugs.append(image.slug) return JSONResponse(slugs)
def post_record_single_image(request): if request.method == 'POST': j = json.loads(request.body) if 'location' in j: location_id = j['location'] burst_length = int(j['burst_length']) if 'burst_length' in j else 1 burst_is_scan = bool( j['burst_is_scan']) if 'burst_is_scan' in j else False if not has_write_access_location_id(request, location_id): return HttpResponse(status=403) nodes = CaptureNode.objects.filter(location__id=location_id, online=True) summary = {} summary['result'] = 'OK' summary['nodes'] = [] summary['timings'] = [] timings_start = time.time() # Prepare Single Image Capture g_pool.map(parallel_all_prepare_single, zip(nodes, [burst_length] * len(nodes))) summary['timings'].append( ('parallel_all_prepare_single', time.time() - timings_start)) timings_start = time.time() g_pool.map(parallel_all_prepare_single2, nodes) summary['timings'].append( ('parallel_all_prepare_single2', time.time() - timings_start)) timings_start = time.time() # Start Single Image Capture g_pool.map(parallel_all_start_single, nodes) summary['timings'].append( ('parallel_all_start_single', time.time() - timings_start)) timings_start = time.time() # Finalize Single Image Capture g_pool.map(parallel_all_finalize_single, [(n, summary['nodes']) for n in nodes]) summary['timings'].append( ('parallel_all_finalize_single', time.time() - timings_start)) timings_start = time.time() # Add rotation flag to all cameras (this info is in th DB, and does not come from the nodes) add_rotation_info_to_cameras(summary) summary['timings'].append( ('add_rotation_info_to_cameras', time.time() - timings_start)) timings_start = time.time() # Store in DB loc = CaptureLocation.objects.get(pk=location_id) # Create shot session_id = j['session_id'] if 'session_id' in j else None shot_name = j['shot'] if 'shot' in j else None create_session_shot(loc, session_id, shot_name) summary['timings'].append( ('create_session', time.time() - timings_start)) timings_start = time.time() # Store capture in archive register_new_take(loc, summary, is_burst=burst_length > 1, is_scan=burst_is_scan and burst_length > 1) summary['timings'].append( ('register_new_take', time.time() - timings_start)) timings_start = time.time() #print summary['timings'] return JSONResponse(summary)
def current_user(request): serializer = UserSerializer(request.user, context={'request': request}) data = serializer.data data['gravatar'] = get_gravatar_url(data['email'], size=150) return JSONResponse(data)
def register(request): """allows a user to register Used HTTP POST variables: username - the username password - the password confirm - the password again email - the email address """ response = HttpResponse(mimetype='application/json') if 'email' not in request.POST or 'username' not in request.POST or 'password' not in request.POST or 'confirm' not in request.POST: message = JSONResponse(success=False, message="Missing POST paramater! ") response.write(message.json()) return response if request.POST['username'] == "" or request.POST['password'] == "": message = JSONResponse(success=False, message="Username and Password can't be empty") response.write(message.json()) return response if User.objects.filter(username=request.POST['username']).exists(): message = JSONResponse(success=False, message="Username already exists!") response.write(message.json()) return response if not email_re.match(request.POST['email']): message = JSONResponse(success=False, message="Email dosen't look like a valid email address") response.write(message.json()) return response if not request.POST['password'] == request.POST['confirm']: message = JSONResponse(success=False, message="Password confirm must be equal to password") response.write(message.json()) return response user = User.objects.create_user(request.POST['username'], request.POST['email'], request.POST['password']) user.is_superuser = False user.is_stuff = False user.is_active = True user.save() message = JSONResponse(success=True, message="User created") response.write(message.json()) return response
def restart_job(request): if not 'job_id' in request.data: return HttpResponse(status=500) clone_job = request.data[ 'clone_job'] if 'clone_job' in request.data else False use_same_machine = request.data[ 'use_same_machine'] if 'use_same_machine' in request.data else False # Find Job to be restarted src_job = FarmJob.objects.get(pk=request.data['job_id']) if not src_job: return HttpResponse(status=404) if not src_job.has_write_access(request.user): return HttpResponse(status=403) if clone_job: # Create duplicate job job = FarmJob(job_class=src_job.job_class, created_by=request.user.username, params=src_job.params, node=src_job.node if use_same_machine else None, ext_take=src_job.ext_take, ext_scan_assets=src_job.ext_scan_assets, ext_tracking_assets=src_job.ext_tracking_assets, req_gpu=src_job.req_gpu, priority=src_job.priority, status='created') job.save() # copy tags in a second pass (required for ManyToMany) job.tags.set(*src_job.tags.names(), clear=True) job.status = 'ready' job.save() g_logger.info('Job #%d restarted as job #%d' % (src_job.id, job.id)) else: # If some child are still running, refust to restart if src_job.children.filter(Q(status='running') | Q(status='waiting')).count() > 0: return JSONResponse({'message': 'Error, child running'}, status=403) # Delete all child jobs src_job.children.all().delete() on_job_restart(src_job.id) # Update job status src_job.status = 'ready' src_job.exception = None src_job.image_filename = None src_job.mesh_filename = None src_job.progress = None src_job.start_time = None src_job.end_time = None if not use_same_machine: src_job.node = None src_job.save() g_logger.info('Job #%d restarted' % (src_job.id)) return HttpResponse()
def post_new_session(request, location_id="0"): if request.method == 'POST': location_id = int(location_id) # Check user permission locs = request.user.access_rights.filter( id=location_id, locationaccess__write_access=True) if not locs: return HttpResponse(status=403) loc = locs[0] # Get parameters from request j = json.loads(request.body) if not 'name' in j: return HttpResponse(status=500) session_name = j['name'] # Clean weird characters session_name = re.sub('[^a-zA-Z0-9_-]', '', session_name) if not session_name: return HttpResponse(status=500) project = loc.cur_project # Create or select project for this new session if 'project_id' in j: # Add session to existing project project = ArchiveProject.objects.get(pk=int(j['project_id'])) if 'project_name' in j: # Create new project project = ArchiveProject(name=j['project_name']) project.save() # Is the session name unique? i = 0 while ArchiveSession.objects.filter(name=session_name, project=project).count() > 0: session_name = '%s_%03d' % (j['name'], i) i = i + 1 # Create New Session session = ArchiveSession(name=session_name, project=project) session.save() # Create New Shot shot = ArchiveShot(name='Shot_000', session=session) shot.save() # Save Location loc = CaptureLocation.objects.get(pk=location_id) loc.cur_project = project loc.cur_session = session loc.cur_shot = shot loc.save() # Return result result = {} result['session_name'] = session.name result['session_id'] = session.id result['shot_name'] = shot.name result['shot_id'] = shot.id result['project_name'] = session.project.name result['project_id'] = session.project.id return JSONResponse(result)