예제 #1
0
def deploy_script(driverCls, provider, identity, instance_id,
                   script, **celery_task_args):
    try:
        logger.debug("deploy_script task started at %s." % datetime.now())
        #Check if instance still exists
        driver = get_driver(driverCls, provider, identity)
        instance = driver.get_instance(instance_id)
        if not instance:
            logger.debug("Instance has been teminated: %s." % instance_id)
            return
        instance._node.extra['password'] = None

        kwargs = _generate_ssh_kwargs()
        kwargs.update({'deploy': script})
        driver.deploy_to(instance, **kwargs)
        logger.debug("deploy_script task finished at %s." % datetime.now())
    except DeploymentError as exc:
        logger.exception(exc)
        if isinstance(exc.value, NonZeroDeploymentException):
            #The deployment was successful, but the return code on one or more
            # steps is bad. Log the exception and do NOT try again!
            raise exc.value
        #TODO: Check if all exceptions thrown at this time
        #fall in this category, and possibly don't retry if
        #you hit the Exception block below this.
        deploy_script.retry(exc=exc)
    except Exception as exc:
        logger.exception(exc)
        deploy_script.retry(exc=exc)
예제 #2
0
def deploy_init_to(driverCls, provider, identity, instance_id,
                   username=None, password=None, redeploy=False, *args, **kwargs):
    try:
        logger.debug("deploy_init_to task started at %s." % datetime.now())
        driver = get_driver(driverCls, provider, identity)
        instance = driver.get_instance(instance_id)
        if not instance:
            logger.debug("Instance has been teminated: %s." % instance_id)
            return
        image_metadata = driver._connection\
                               .ex_get_image_metadata(instance.machine)
        deploy_chain = get_deploy_chain(driverCls, provider, identity,
                                        instance, username, password, redeploy)
        deploy_chain.apply_async()
        #Can be really useful when testing.
        #if kwargs.get('delay'):
        #    async.get()
        logger.debug("deploy_init_to task finished at %s." % datetime.now())
    except SystemExit:
        logger.exception("System Exits are BAD! Find this and get rid of it!")
        raise Exception("System Exit called")
    except NonZeroDeploymentException:
        raise
    except Exception as exc:
        logger.warn(exc)
        deploy_init_to.retry(exc=exc)
예제 #3
0
def _send_instance_email(driverCls, provider, identity, instance_id):
    try:
        logger.debug("_send_instance_email task started at %s." %
                     datetime.now())
        driver = get_driver(driverCls, provider, identity)
        instance = driver.get_instance(instance_id)
        #Breakout if instance has been deleted at this point
        if not instance:
            logger.debug("Instance has been teminated: %s." % instance_id)
            return
        username = identity.user.username
        profile = UserProfile.objects.get(user__username=username)
        if profile.send_emails:
            #Only send emails if allowed by profile setting
            created = datetime.strptime(instance.extra['created'],
                                        "%Y-%m-%dT%H:%M:%SZ")
            send_instance_email(username,
                                instance.id,
                                instance.name,
                                instance.ip,
                                created,
                                username)
        else:
            logger.debug("User %s elected NOT to receive new instance emails"
                         % username)

        logger.debug("_send_instance_email task finished at %s." %
                     datetime.now())
    except Exception as exc:
        logger.warn(exc)
        _send_instance_email.retry(exc=exc)
예제 #4
0
def _deploy_init_to(driverCls, provider, identity, instance_id,
                    username=None, password=None, redeploy=False,
                    **celery_task_args):
    try:
        logger.debug("_deploy_init_to task started at %s." % datetime.now())
        #Check if instance still exists
        driver = get_driver(driverCls, provider, identity)
        instance = driver.get_instance(instance_id)
        if not instance:
            logger.debug("Instance has been teminated: %s." % instance_id)
            return

        #NOTE: This is unrelated to the password argument
        logger.info(instance.extra)
        instance._node.extra['password'] = None
        msd = init(instance, identity.user.username, password, redeploy)

        kwargs = _generate_ssh_kwargs()
        kwargs.update({'deploy': msd})
        driver.deploy_to(instance, **kwargs)
        _update_status_log(instance, "Deploy Finished")
        logger.debug("_deploy_init_to task finished at %s." % datetime.now())
    except DeploymentError as exc:
        logger.exception(exc)
        if isinstance(exc.value, NonZeroDeploymentException):
            #The deployment was successful, but the return code on one or more
            # steps is bad. Log the exception and do NOT try again!
            raise exc.value
        #TODO: Check if all exceptions thrown at this time
        #fall in this category, and possibly don't retry if
        #you hit the Exception block below this.
        _deploy_init_to.retry(exc=exc)
    except Exception as exc:
        logger.exception(exc)
        _deploy_init_to.retry(exc=exc)
예제 #5
0
def allocation_threshold_check():
    logger.debug("allocation_threshold_check task started at %s." % datetime.now())
    if not settings.CHECK_THRESHOLD:
        logger.debug("CHECK_THRESHOLD is FALSE -- allocation_threshold_check task finished at %s." % datetime.now())
        return

    for allocation_source in AllocationSource.objects.filter(compute_allowed__gte=0).all():
        snapshot = allocation_source.snapshot
        percentage_used = (snapshot.compute_used / snapshot.compute_allowed) * 100
        # check if percentage more than threshold
        THRESHOLD = [50.0, 90.0]
        for threshold in THRESHOLD:
            if percentage_used > threshold:
                compute_used = snapshot.compute_used
                allocation_source_name = allocation_source.name

                # check if event has been fired
                prev_event = EventTable.objects.filter(name='allocation_source_threshold_met',
                                                       payload__allocation_source_name=allocation_source_name,
                                                       payload__threshold=threshold).last()
                if prev_event:
                    continue

                payload = {}
                payload['allocation_source_name'] = allocation_source_name
                payload['threshold'] = threshold
                payload['usage_percentage'] = float(percentage_used)

                EventTable.objects.create(
                    name='allocation_source_threshold_met',
                    payload=payload,
                    entity_id=payload['allocation_source_name'])
                break
    logger.debug("allocation_threshold_check task finished at %s." % datetime.now())
예제 #6
0
def check_image_membership():
    try:
        logger.debug("check_image_membership task started at %s." % datetime.now())
        update_membership()
        logger.debug("check_image_membership task finished at %s." % datetime.now())
    except Exception as exc:
        logger.exception("Error during check_image_membership task")
        check_image_membership.retry(exc=exc)
예제 #7
0
파일: tests.py 프로젝트: astaric/kronometer
    def test_partial_results(self):
        Biker.objects.create(number=1,
                             start_time=datetime.now())
        Biker.objects.create(number=2,
                             start_time=datetime.now(),
                             end_time=datetime.now())

        self.client.post(reverse("results"))
예제 #8
0
def clear_empty_ips():
    logger.debug("clear_empty_ips task started at %s." % datetime.now())
    from service import instance as instance_service
    from rtwo.driver import OSDriver
    from api import get_esh_driver
    from service.accounts.openstack import AccountDriver as OSAccountDriver

    identities = Identity.objects.filter(provider__type__name__iexact="openstack", provider__active=True)
    key_sorter = lambda ident: attrgetter(ident.provider.type.name, ident.created_by.username)
    identities = sorted(identities, key=key_sorter)
    os_acct_driver = None
    total = len(identities)
    for idx, core_identity in enumerate(identities):
        try:
            # Initialize the drivers
            driver = get_esh_driver(core_identity)
            if not isinstance(driver, OSDriver):
                continue
            if not os_acct_driver or os_acct_driver.core_provider != core_identity.provider:
                os_acct_driver = OSAccountDriver(core_identity.provider)
                logger.info("Initialized account driver")
            # Get useful info
            creds = core_identity.get_credentials()
            tenant_name = creds["ex_tenant_name"]
            logger.info("Checking Identity %s/%s - %s" % (idx + 1, total, tenant_name))
            # Attempt to clean floating IPs
            num_ips_removed = driver._clean_floating_ip()
            if num_ips_removed:
                logger.debug("Removed %s ips from OpenStack Tenant %s" % (num_ips_removed, tenant_name))
            # Test for active/inactive instances
            instances = driver.list_instances()
            active = any(driver._is_active_instance(inst) for inst in instances)
            inactive = all(driver._is_inactive_instance(inst) for inst in instances)
            for instance in instances:
                if driver._is_inactive_instance(instance) and instance.ip:
                    # If an inactive instance has floating/fixed IPs.. Remove them!
                    instance_service.remove_ips(driver, instance)
            if active and not inactive:
                # User has >1 active instances AND not all instances inactive
                pass
            elif os_acct_driver.network_manager.get_network_id(
                os_acct_driver.network_manager.neutron, "%s-net" % tenant_name
            ):
                # User has 0 active instances OR all instances are inactive
                # Network exists, attempt to dismantle as much as possible
                remove_network = not inactive
                logger.info("Removing project network %s for %s" % (remove_network, tenant_name))
                if remove_network:
                    # Sec. group can't be deleted if instances are suspended
                    # when instances are suspended we pass remove_network=False
                    os_acct_driver.delete_security_group(core_identity)
                    os_acct_driver.delete_network(core_identity, remove_network=remove_network)
            else:
                # logger.info("No Network found. Skipping %s" % tenant_name)
                pass
        except Exception as exc:
            logger.exception(exc)
    logger.debug("clear_empty_ips task finished at %s." % datetime.now())
예제 #9
0
    def test_addNewCompany(self):
        c = controller()
        self.assertEqual(c.addNewCompany('testcompany1',datetime.now(),1888.50), True)
        self.assertEqual(c.addNewCompany('testcompany2',datetime.now(),888.50), True)
	objects = c.listAllCompanies()
	for object in objects:
            print 'company: ' + object.company
            print 'date of quotation: ' + object.date_of_quote.strftime("%Y-%m-%d %H:%M%S")
            print 'total price: ' + str(object.total_price)
예제 #10
0
def deploy_to(driverCls, provider, identity, instance_id, *args, **kwargs):
    try:
        logger.debug("deploy_to task started at %s." % datetime.now())
        driver = get_driver(driverCls, provider, identity)
        instance = driver.get_instance(instance_id)
        driver.deploy_to(instance, *args, **kwargs)
        logger.debug("deploy_to task finished at %s." % datetime.now())
    except Exception as exc:
        logger.warn(exc)
        deploy_to.retry(exc=exc)
예제 #11
0
def clean_empty_ips(driverCls, provider, identity, *args, **kwargs):
    try:
        logger.debug("remove_floating_ip task started at %s." % datetime.now())
        driver = get_driver(driverCls, provider, identity)
        ips_cleaned = driver._clean_floating_ip()
        logger.debug("remove_floating_ip task finished at %s." % datetime.now())
        return ips_cleaned
    except Exception as exc:
        logger.warn(exc)
        clean_empty_ips.retry(exc=exc)
예제 #12
0
def add_os_project_network(core_identity, *args, **kwargs):
    try:
        logger.debug("add_os_project_network task started at %s." % datetime.now())
        from rtwo.accounts.openstack import AccountDriver as OSAccountDriver

        account_driver = OSAccountDriver(core_identity.provider)
        account_driver.create_network(core_identity)
        logger.debug("add_os_project_network task finished at %s." % datetime.now())
    except Exception as exc:
        add_os_project_network.retry(exc=exc)
예제 #13
0
def add_floating_ip(driverCls, provider, identity,
                    instance_alias, delete_status=True,
                    *args, **kwargs):
    #For testing ONLY.. Test cases ignore countdown..
    if app.conf.CELERY_ALWAYS_EAGER:
        logger.debug("Eager task waiting 15 seconds")
        time.sleep(15)
    try:
        logger.debug("add_floating_ip task started at %s." % datetime.now())
        #Remove unused floating IPs first, so they can be re-used
        driver = get_driver(driverCls, provider, identity)
        driver._clean_floating_ip()

        #assign if instance doesn't already have an IP addr
        instance = driver.get_instance(instance_alias)
        if not instance:
            logger.debug("Instance has been teminated: %s." % instance_alias)
            return None
        floating_ips = driver._connection.neutron_list_ips(instance)
        if floating_ips:
            floating_ip = floating_ips[0]["floating_ip_address"]
        else:
            floating_ip = driver._connection.neutron_associate_ip(
                instance, *args, **kwargs)["floating_ip_address"]
        _update_status_log(instance, "Networking Complete")
        #TODO: Implement this as its own task, with the result from
        #'floating_ip' passed in. Add it to the deploy_chain before deploy_to
        hostname = ""
        if floating_ip.startswith('128.196'):
            regex = re.compile(
                "(?P<one>[0-9]+)\.(?P<two>[0-9]+)\."
                "(?P<three>[0-9]+)\.(?P<four>[0-9]+)")
            r = regex.search(floating_ip)
            (one, two, three, four) = r.groups()
            hostname = "vm%s-%s.iplantcollaborative.org" % (three, four)
        else:
            # Find a way to convert new floating IPs to hostnames..
            hostname = floating_ip
        update_instance_metadata(driver, instance, data={
            'public-hostname': hostname,
            'public-ip':floating_ip}, replace=False)

        logger.info("Assigned IP:%s - Hostname:%s" % (floating_ip, hostname))
        #End
        logger.debug("add_floating_ip task finished at %s." % datetime.now())
        return {"floating_ip":floating_ip, "hostname":hostname}
    except Exception as exc:
        logger.exception("Error occurred while assigning a floating IP")
        #Networking can take a LONG time when an instance first launches,
        #it can also be one of those things you 'just miss' by a few seconds..
        #So we will retry 30 times using limited exp.backoff
        #Max Time: 53min
        countdown = min(2**current.request.retries, 128)
        add_floating_ip.retry(exc=exc,
                              countdown=countdown)
예제 #14
0
def destroy_instance(instance_alias, core_identity_id):
    from service import instance as instance_service
    from rtwo.driver import OSDriver
    from api import get_esh_driver
    try:
        logger.debug("destroy_instance task started at %s." % datetime.now())
        node_destroyed = instance_service.destroy_instance(
            core_identity_id, instance_alias)
        core_identity = Identity.objects.get(id=core_identity_id)
        driver = get_esh_driver(core_identity)
        if isinstance(driver, OSDriver):
            #Spawn off the last two tasks
            logger.debug("OSDriver Logic -- Remove floating ips and check"
                         " for empty project")
            driverCls = driver.__class__
            provider = driver.provider
            identity = driver.identity
            instances = driver.list_instances()
            active = [driver._is_active_instance(inst) for inst in instances]
            if not active:
                logger.debug("Driver shows 0 of %s instances are active"
                             % (len(instances),))
                #For testing ONLY.. Test cases ignore countdown..
                if app.conf.CELERY_ALWAYS_EAGER:
                    logger.debug("Eager task waiting 1 minute")
                    time.sleep(60)
                destroy_chain = chain(
                    clean_empty_ips.subtask(
                        (driverCls, provider, identity),
                        immutable=True, countdown=5),
                    remove_empty_network.subtask(
                        (driverCls, provider, identity, core_identity_id),
                        immutable=True, countdown=60))
                destroy_chain()
            else:
                logger.debug("Driver shows %s of %s instances are active"
                             % (len(active), len(instances)))
                #For testing ONLY.. Test cases ignore countdown..
                if app.conf.CELERY_ALWAYS_EAGER:
                    logger.debug("Eager task waiting 15 seconds")
                    time.sleep(15)
                destroy_chain = \
                    clean_empty_ips.subtask(
                        (driverCls, provider, identity),
                        immutable=True, countdown=5).apply_async()
        logger.debug("destroy_instance task finished at %s." % datetime.now())
        return node_destroyed
    except Exception as exc:
        logger.exception(exc)
        destroy_instance.retry(exc=exc)
예제 #15
0
파일: views.py 프로젝트: waytai/smallslive
 def get_context_data(self, **kwargs):
     context = super(HomepageView, self).get_context_data(**kwargs)
     today = datetime.now().date()
     few_days_out = today + timedelta(days=3)
     context['events'] = Event.objects.filter(start_day__range=(today, few_days_out)).reverse()
     context['videos'] = Media.objects.order_by('-id')[:5]
     return context
예제 #16
0
 def delete(self, request, provider_id, identity_id, volume_id):
     """
     Destroys the volume and updates the DB
     """
     user = request.user
     #Ensure volume exists
     esh_driver = prepare_driver(request, provider_id, identity_id)
     if not esh_driver:
         return invalid_creds(provider_id, identity_id)
     esh_volume = esh_driver.get_volume(volume_id)
     if not esh_volume:
         return volume_not_found(volume_id)
     core_volume = convert_esh_volume(esh_volume, provider_id,
                                      identity_id, user)
     #Delete the object, update the DB
     esh_driver.destroy_volume(esh_volume)
     core_volume.end_date = datetime.now()
     core_volume.save()
     #Return the object
     serialized_data = VolumeSerializer(core_volume,
                                        context={'user':request.user},
             
             ).data
     response = Response(serialized_data)
     return response
예제 #17
0
    def test_write_datetime_to_settings_file(self):
        self.clean_config_file()

        # create new AppSettings object
        settings = AppSettings()
        settings.create_defaults()

        # get a configuration value (default in the global section)
        now = datetime.now()
        value = now.isoformat()
        settings.set(
            key="cisco_eox_api_auto_sync_last_execution_time",
            section=AppSettings.CISCO_EOX_CRAWLER_SECTION,
            value=value
        )

        settings.write_file()

        read_value = settings.get_string(
            key="cisco_eox_api_auto_sync_last_execution_time",
            section=AppSettings.CISCO_EOX_CRAWLER_SECTION
        )
        self.assertEqual(value, read_value)
        self.assertEqual(now, parse_datetime(read_value))

        # cleanup
        self.clean_config_file()
예제 #18
0
    def created_in(self, delta):
        """Filters queryset based on the past time from it's been created.

        :param delta: Delta threshold from now to filter the queryset.
        :type delta: datetime.timedelta
        """
        return self.filter(created__gte=datetime.now() - delta)
예제 #19
0
def setTimer():
    dts = datetime(day=Donnee.dateCoupure.day, month=Donnee.dateCoupure.month, year=Donnee.dateCoupure.year)
    print(dts)
    tps = dts - datetime.now()
    print(tps)
    print("Coupure dans {0} seconds".format(int(tps.total_seconds())+1))
    Donnee.timer = threading.Timer(int(tps.total_seconds())+1, finii) #executerCoupure)
예제 #20
0
def returnReportIndex(request):
    thisYear = datetime.now().year
    thisMonth = datetime.now().month
    periods = [
        [(year, x, calendar.month_name[x]) for year in range(thisYear - 4, thisYear + 6)]
        for x in range(1, 13)
    ]
    return render_to_response('organisation/returnReportIndex.html',
                              {
                                  'maintitle': 'Box Office Returns',
                                  'periods': periods,
                                  'thisYear': thisYear,
                                  'thisMonth': thisMonth,
                              },
                              context_instance=RequestContext(request)
    )
예제 #21
0
def remove_empty_networks():
    try:
        logger.debug("remove_empty_networks task started at %s." %
                     datetime.now())
        for provider in Provider.get_active(type_name='openstack'):
            os_driver = OSAccountDriver(provider)
            all_instances = os_driver.admin_driver.list_all_instances()
            project_map = os_driver.network_manager.project_network_map()
            projects_with_networks = project_map.keys()
            for project in projects_with_networks:
                network_name = project_map[project]['network']['name']
                logger.debug("Checking if network %s is in use" % network_name)
                if running_instances(network_name, all_instances):
                    continue
                #TODO: Will change when not using 'usergroups' explicitly.
                user = project
                try:
                    logger.debug("Removing project network for User:%s, Project:%s"
                                 % (user, project))
                    os_driver.network_manager.delete_project_network(user, project)
                except NeutronClientException:
                    logger.exception("Neutron unable to remove project"
                                     "network for %s-%s" % (user,project))
                except NeutronException:
                    logger.exception("Neutron unable to remove project"
                                     "network for %s-%s" % (user,project))
    except Exception as exc:
        logger.exception("Failed to run remove_empty_networks")
예제 #22
0
def add_jobs_listing_page(slug, cls):
    job_category = JobCategory(
        job_category='CFPB Testing job category',
        blurb='CFPB Testing blurb'
    )
    job_category.save()

    job_region = JobLocation(
        abbreviation='TR',
        name='Testing Region'
    )
    job_region.save()

    jobs_listing_page = cls(
        close_date=datetime.now() + timedelta(days=30),
        description='Test Job Description',
        division=job_category,
        open_date=datetime.today(),
        salary_max=120000,
        salary_min=95000,
        slug=slug,
        title=slug,
        location=job_region
    )

    publish_page(jobs_listing_page)
예제 #23
0
 def finish(self, outcome):
     if self.outcome != self.EVENT_OUTCOME_CHOICES.IN_PROGRESS:
         return False
     self.outcome = outcome
     self.end_date = datetime.now()
     self.save()
     return True
예제 #24
0
def launch_instance(user, provider_uuid, identity_uuid,
                    size_alias, source_alias, deploy=True,
                    **launch_kwargs):
    """
    USE THIS TO LAUNCH YOUR INSTANCE FROM THE REPL!
    Initialization point --> launch_*_instance --> ..
    Required arguments will launch the instance, extras will do
    provider-specific modifications.

    1. Test for available Size (on specific driver!)
    2. Test user has Quota/Allocation (on our DB)
    3. Test user is launching appropriate size (Not below Thresholds)
    4. Perform an 'Instance launch' depending on Boot Source
    5. Return CORE Instance with new 'esh' objects attached.
    """
    now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    status_logger.debug("%s,%s,%s,%s,%s,%s"
                 % (now_time, user, "No Instance", source_alias, size_alias,
                    "Request Received"))
    identity = CoreIdentity.objects.get(uuid=identity_uuid)
    esh_driver = get_cached_driver(identity=identity)

    #May raise Exception("Size not available")
    size = check_size(esh_driver, size_alias, provider_uuid)

    #May raise Exception("Volume/Machine not available")
    boot_source = get_boot_source(user.username, identity_uuid, source_alias)

    #Raise any other exceptions before launching here
    _pre_launch_validation(user.username, esh_driver, identity_uuid, boot_source, size)

    core_instance = _select_and_launch_source(user, identity_uuid, esh_driver, boot_source, size, deploy=deploy, **launch_kwargs)
    return core_instance
예제 #25
0
def get_path_name(instance, filename):
    base_path = ''
    if not instance.is_permanent:
        now = datetime.now()
        base_path = now.strftime('%Y/%m/%d/%H/%M')
    return os.path.normpath('/'.join([instance.directory.path,
                                      base_path, filename])).strip('/')
 def get(self, request, provider_uuid, identity_uuid, volume_id):
     """
     """
     user = request.user
     esh_driver = prepare_driver(request, provider_uuid, identity_uuid)
     if not esh_driver:
         return invalid_creds(provider_uuid, identity_uuid)
     try:
         esh_volume = esh_driver.get_volume(volume_id)
     except ConnectionFailure:
         return connection_failure(provider_uuid, identity_uuid)
     except InvalidCredsError:
         return invalid_creds(provider_uuid, identity_uuid)
     except Exception as exc:
         logger.exception("Encountered a generic exception. "
                          "Returning 409-CONFLICT")
         return failure_response(status.HTTP_409_CONFLICT,
                                 str(exc.message))
     if not esh_volume:
         try:
             source = InstanceSource.objects.get(
                 identifier=volume_id,
                 provider__uuid=provider_uuid)
             source.end_date = datetime.now()
             source.save()
         except (InstanceSource.DoesNotExist, CoreVolume.DoesNotExist):
             pass
         return volume_not_found(volume_id)
     core_volume = convert_esh_volume(esh_volume, provider_uuid,
                                      identity_uuid, user)
     serialized_data = VolumeSerializer(core_volume,
                                        context={'request': request}).data
     response = Response(serialized_data)
     return response
예제 #27
0
def wait_for(instance_alias, driverCls, provider, identity, status_query,
        tasks_allowed=False, return_id=False, **task_kwargs):
    """
    #Task makes 250 attempts to 'look at' the instance, waiting 15sec each try
    Cumulative time == 1 hour 2 minutes 30 seconds before FAILURE

    status_query = "active" Match only one value, active
    status_query = ["active","suspended"] or match multiple values.
    """
    from service import instance as instance_service
    try:
        logger.debug("wait_for task started at %s." % datetime.now())
        if app.conf.CELERY_ALWAYS_EAGER:
            logger.debug("Eager task - DO NOT return until its ready!")
            return _eager_override(wait_for, _is_instance_ready, 
                                    (driverCls, provider, identity,
                                     instance_alias, status_query,
                                     tasks_allowed, return_id), {})

        result = _is_instance_ready(driverCls, provider, identity,
                                  instance_alias, status_query,
                                  tasks_allowed, return_id)
        return result
    except Exception as exc:
        if "Not Ready" not in str(exc):
            # Ignore 'normal' errors.
            logger.exception(exc)
        wait_for.retry(exc=exc)
예제 #28
0
 def get_allocation_dict(self):
     if not self.allocation:
         return {}
     #Don't move it up. Circular reference.
     from service.allocation import get_time, get_burn_time,\
         delta_to_minutes, delta_to_hours, get_delta
     delta = get_delta(self, time_period=relativedelta(day=1, months=1))
     time_used = get_time(self.identity.created_by,
                          self.identity.id,
                          delta)
     burn_time = get_burn_time(self.identity.created_by, self.identity.id,
                               delta,
                               timedelta(minutes=self.allocation.threshold))
     mins_consumed = delta_to_minutes(time_used)
     if burn_time:
         burn_time = delta_to_hours(burn_time)
     zero_time = datetime.now() + timedelta(
             minutes=(self.allocation.threshold - mins_consumed))
     allocation_dict = {
         "threshold": floor(self.allocation.threshold/60),
         "current": floor(mins_consumed/60),
         "delta": ceil(delta.total_seconds()/60),
         "burn": burn_time,
         "ttz": zero_time,
     }
     return allocation_dict
예제 #29
0
 def check_game(self):
     current_time = make_aware(datetime.now())
     if current_time > self.next_turn:
         if self.started:
             self.start_next_turn()
         else:
             self.start()
예제 #30
0
def home(request):
    """view for the homepage of the Product DB
    :param request:
    :return:
    """
    if login_required_if_login_only_mode(request):
        return redirect("%s?next=%s" % (settings.LOGIN_URL, request.path))

    today_date = datetime.now().date()

    context = cache.get(HOMEPAGE_CONTEXT_CACHE_KEY)
    if not context:
        all_products_query = Product.objects.all()
        context = {
            "recent_events": NotificationMessage.objects.filter(
                created__gte=datetime.now(get_current_timezone()) - timedelta(days=30)
            ).order_by("-created")[:5],
            "vendors": [x.name for x in Vendor.objects.all() if x.name != "unassigned"],
            "product_count": all_products_query.count(),
            "product_lifecycle_count": all_products_query.filter(eox_update_time_stamp__isnull=False).count(),
            "product_no_eol_announcement_count": all_products_query.filter(
                eox_update_time_stamp__isnull=False, eol_ext_announcement_date__isnull=True
            ).count(),
            "product_eol_announcement_count": all_products_query.filter(
                eol_ext_announcement_date__isnull=False, end_of_sale_date__gt=today_date
            ).count(),
            "product_eos_count": all_products_query.filter(
                Q(end_of_sale_date__lte=today_date, end_of_support_date__gt=today_date)
                | Q(end_of_sale_date__lte=today_date, end_of_support_date__isnull=True)
            ).count(),
            "product_eol_count": all_products_query.filter(end_of_support_date__lte=today_date).count(),
            "product_price_count": all_products_query.filter(list_price__isnull=False).count(),
        }
        cache.set(HOMEPAGE_CONTEXT_CACHE_KEY, context, timeout=60 * 10)

    context.update(
        {
            "TB_HOMEPAGE_TEXT_BEFORE_FAVORITE_ACTIONS": TextBlock.objects.filter(
                name=TextBlock.TB_HOMEPAGE_TEXT_BEFORE_FAVORITE_ACTIONS
            ).first(),
            "TB_HOMEPAGE_TEXT_AFTER_FAVORITE_ACTIONS": TextBlock.objects.filter(
                name=TextBlock.TB_HOMEPAGE_TEXT_AFTER_FAVORITE_ACTIONS
            ).first(),
        }
    )

    return render(request, "productdb/home.html", context=context)
예제 #31
0
파일: views.py 프로젝트: bfi3ld/MSCProject
def edit_submission(request, pk):
    patch = Patch.objects.get(pk=pk)
    display_html = ''

    latest_submission, created = Submission.objects.get_or_create(
        patch=patch,
        student=Student.objects.get(user=request.user),
        is_original=False)

    if created:
        old_content = Submission.objects.get(
            patch=patch,
            student=Student.objects.get(user=request.user),
            is_original=True)

        latest_submission.content = old_content.content
        latest_submission.save()
    if request.method == 'POST':
        form = EditSubmissionForm(request.POST)
        if form.is_valid():
            updated_submission = form.cleaned_data['content']

            text1 = BeautifulSoup(latest_submission.content,
                                  features="html.parser")
            text2 = BeautifulSoup(updated_submission, features="html.parser")

            conv_1 = text1.get_text()
            conv_2 = text2.get_text()

            dmp = diff_match_patch()
            difference = dmp.diff_main(conv_1, conv_2)

            dmp.diff_cleanupSemantic(difference)

            display_html = dmp.diff_prettyHtml(difference)

            latest_submission.content = updated_submission
            latest_submission.published_date = datetime.now()
            latest_submission.save()

            submission_edits = Submission_edits(deleted=display_html,
                                                date_time=datetime.now(),
                                                submission=latest_submission)
            submission_edits.save()
            latest_submission.content = updated_submission
            latest_submission.published_date = datetime.now()
            latest_submission.save()
            return redirect('view_feedback', pk=pk)

    else:
        form = EditSubmissionForm(
            initial={'content': latest_submission.content})

    return render(request,
                  'edit_submission.html',
                  context={
                      'form': form,
                      'latest_submission': latest_submission,
                      'patch': patch,
                      'display_html': display_html
                  })
예제 #32
0
    def test_edit_task_success(self):
        self.client1.force_authenticate(self.user1)
        response = self.client1.patch('/api/task/10/',
                                      data={'finished': datetime.now()})

        self.assertEqual(response.status_code, 200)
예제 #33
0
def revenue(request):
    search = request.GET.get('d')
    today = datetime.now().strftime('%d-%m-%Y')
    orders = Order.objects.filter(paid=True)
    try:
        # income
        income_total = "{:,}".format(orders.aggregate(income_amounts=Sum('total_price'))['income_amounts'])
        income_today = orders.filter(created__day=today.split('-')[0], created__month=today.split('-')[1], created__year=today.split('-')[2]).aggregate(income_amounts=Sum('total_price'))[
            'income_amounts']
        if income_today is None:
            income_today = 0
        income_month = orders.filter(created__month=today.split('-')[1], created__year=today.split('-')[2]).aggregate(income_amounts=Sum('total_price'))[
            'income_amounts']
        if income_month is None:
            income_today = 0      # order
        order_total = orders.count()
        order_today = orders.filter(created__day=today.split('-')[0], created__month=today.split('-')[1], created__year=today.split('-')[2]).count()
        order_month = orders.filter(created__month=today.split('-')[1], created__year=today.split('-')[2]).count()
        # compare
        income_yesterday = \
        orders.filter(created__day=(int(today.split('-')[0]) - 1), created__month=today.split('-')[1], created__year=today.split('-')[2]).aggregate(income_amounts=Sum('total_price'))[
            'income_amounts']
        income_last_month = \
        orders.filter(created__month=(int(today.split('-')[1]) - 1), created__year=today.split('-')[2]).aggregate(income_amounts=Sum('total_price'))[
            'income_amounts']
        if income_yesterday and income_yesterday != 0:
            income_day_rate = (income_today - income_yesterday) / income_yesterday * 100
        else:
            income_day_rate = 100

        if income_last_month and income_last_month != 0:
            income_month_rate = (income_month - income_last_month) / income_last_month * 100
        else:
            income_month_rate = 100

        income_today = "{:,}".format(income_today)
        income_month = "{:,}".format(income_month)

        # data for highchart
        select_data_by_day = {"create": """strftime('%%d-%%m-%%Y', created)"""}
        select_data_by_month = {"create": """strftime('%%m-%%Y', created)"""}
        # income

        income_month_dataset = orders.extra(select=select_data_by_month).values('create').annotate(income=Sum('total_price')).order_by('-create')[:10]

        income_day_dataset_all = orders.extra(select=select_data_by_day).values('create').annotate(
            income=Sum('total_price')).order_by('-create')
        if search:
            income_day_dataset_all = [income for income in income_day_dataset_all if income['create'] == search]
    except:
        pass


    paginator = Paginator(income_day_dataset_all, 10)  # 10 orders in each page
    page = request.GET.get('page')
    try:
        income_day_dataset = paginator.page(page)
    except PageNotAnInteger:
        # If page is not an integer deliver the first page
        income_day_dataset = paginator.page(1)
    except EmptyPage:
        # If page is out of range deliver last page of results
        income_day_dataset = paginator.page(paginator.num_pages)

    context = {
        'today':today,
        'month': today.split('-')[1] + "-" + today.split('-')[2],
        'income_total': income_total,
        'income_today': income_today,
        'income_month': income_month,
        'order_total': order_total,
        'order_today': order_today,
        'order_month': order_month,
        'income_day_rate': income_day_rate,
        'income_month_rate': income_month_rate,
        'income_day_dataset': income_day_dataset,
        'income_month_dataset': income_month_dataset,
        'page': page,

    }
    return render(request, 'statisticadmin/dashboard/revenue.html', context)
 def clear_expired(cls):
     return cls.objects.filter(expiry_date__lt=datetime.now()).delete()
예제 #35
0
 def is_active_now(self):
     return self.fijo().horaIni < datetime.now().time() < self.fijo(
     ).horaFin if self.is_fijo() else self.movil().activo
예제 #36
0
    def update_quotes(ticker_symbol, force_update=False):

        ticker, _ = Ticker.objects.get_or_create(symbol=ticker_symbol)

        last_business_day = datetime.today().date()
        # weekday() gives 0 for Monday through 6 for Sunday
        while last_business_day.weekday() > 4:
            last_business_day = last_business_day + timedelta(days=-1)

        # don't waste work
        if force_update or ticker.latest_quote_date(
        ) is None or ticker.latest_quote_date() < last_business_day:

            print('latest_quote_date: {0}, last_business_day: {1}'.format(
                ticker.latest_quote_date(), last_business_day))

            print('Updating: {0}'.format(ticker_symbol))

            today = datetime.now()
            start = today + timedelta(weeks=-settings.WEEKS_TO_DOWNLOAD)

            new_quotes = dict()
            yahoo_data = web.get_data_yahoo(ticker_symbol, start, today)
            try:
                for row in yahoo_data.iterrows():
                    quote_date = row[0].strftime('%Y-%m-%d')
                    quote_data = row[1].to_dict()
                    new_quotes[quote_date] = quote_data
            except RemoteDataError:
                print(
                    'Error getting finance data for {0}'.format(ticker_symbol))
                return

            # base data from finance API:
            for quote_date, quote_data in new_quotes.items():
                try:
                    quote, _ = Quote.objects.get_or_create(ticker=ticker,
                                                           date=quote_date)
                except Quote.MultipleObjectsReturned:
                    quote = Quote.objects.filter(ticker=ticker,
                                                 date=quote_date).first()
                quote.high = quote_data['High']
                quote.low = quote_data['Low']
                quote.open = quote_data['Open']
                quote.close = quote_data['Close']
                quote.volume = quote_data['Volume']
                quote.adj_close = quote_data['Adj Close']
                quote.save()

            index_quotes_dict = {
                q.date: q
                for q in Ticker.objects.get(
                    symbol=settings.INDEX_TICKER).quote_set.order_by('date')
            }

            ticker_quotes_list = [q for q in ticker.quote_set.order_by('date')]

            # set scaled_adj_close on all quotes first
            for quote in ticker_quotes_list:
                quote.index_adj_close = index_quotes_dict[quote.date].adj_close
                quote.scaled_adj_close = quote.adj_close / quote.index_adj_close

            # calculate moving average for each day
            for quote in ticker_quotes_list:
                moving_average_start = quote.date + timedelta(
                    weeks=-settings.MOVING_AVERAGE_WEEKS)
                moving_average_quote_set = [
                    q for q in ticker_quotes_list
                    if moving_average_start <= q.date <= quote.date
                ]
                moving_average_quote_values = [
                    v.scaled_adj_close for v in moving_average_quote_set
                ]
                quote.quotes_in_moving_average = len(
                    moving_average_quote_values)
                quote.sac_moving_average = sum(
                    moving_average_quote_values
                ) / quote.quotes_in_moving_average
                quote.sac_to_sacma_ratio = quote.scaled_adj_close / quote.sac_moving_average

            # save changes
            for quote in ticker_quotes_list:
                quote.save()

            print('Found %s quotes for %s from %s to %s' %
                  (len(new_quotes), ticker_symbol, start.strftime('%Y-%m-%d'),
                   today.strftime('%Y-%m-%d')))
예제 #37
0
    def test_acs_with_authn_response_includes_subjectLocality(self):
        self._skip_if_xmlsec_binary_missing()
        self.config.use_signed_authn_request = True
        self.config.save()

        with override_settings(SAML_KEY_FILE=self.ipd_key_path,
                               SAML_CERT_FILE=self.ipd_cert_path):
            saml2config = self.config
            sp_config = config.SPConfig()
            sp_config.load(create_saml_config_for(saml2config))
            sp_metadata = create_metadata_string('',
                                                 config=sp_config,
                                                 sign=True)

        idp_config = self.get_idp_config(sp_metadata)

        identity = {
            "eduPersonAffiliation": ["staff", "member"],
            "surName": ["Jeter"],
            "givenName": ["Derek"],
            "mail": ["*****@*****.**"],
            "title": ["shortstop"]
        }

        with closing(SamlServer(idp_config)) as server:
            name_id = server.ident.transient_nameid(
                "urn:mace:example.com:saml:roland:idp", "id12")

            authn_context_ref = authn_context_class_ref(
                AUTHN_PASSWORD_PROTECTED)
            authn_context = AuthnContext(
                authn_context_class_ref=authn_context_ref)

            locality = saml.SubjectLocality()
            locality.address = "172.31.25.30"

            authn_statement = AuthnStatement(
                subject_locality=locality,
                authn_instant=datetime.now().isoformat(),
                authn_context=authn_context,
                session_index="id12")

            authn_response = server.create_authn_response(
                identity,
                "id12",  # in_response_to
                self.
                sp_acs_location,  # consumer_url. config.sp.endpoints.assertion_consumer_service:["acs_endpoint"]
                self.sp_acs_location,  # sp_entity_id
                name_id=name_id,
                sign_assertion=True,
                sign_response=True,
                authn_statement=authn_statement)

        base64_encoded_response_metadata = base64.b64encode(
            authn_response.encode('utf-8'))
        base_64_utf8_response_metadata = base64_encoded_response_metadata.decode(
            'utf-8')

        request = self.client.post(
            reverse('assertion_consumer_service',
                    kwargs={'idp_name': self.config.slug}),
            {'SAMLResponse': base_64_utf8_response_metadata})
예제 #38
0
파일: driver.py 프로젝트: eandhy/atmosphere
def add_floating_ip(driverCls,
                    provider,
                    identity,
                    instance_alias,
                    delete_status=True,
                    *args,
                    **kwargs):
    #For testing ONLY.. Test cases ignore countdown..
    if app.conf.CELERY_ALWAYS_EAGER:
        logger.debug("Eager task waiting 15 seconds")
        time.sleep(15)
    try:
        logger.debug("add_floating_ip task started at %s." % datetime.now())
        #Remove unused floating IPs first, so they can be re-used
        driver = get_driver(driverCls, provider, identity)
        driver._clean_floating_ip()

        #assign if instance doesn't already have an IP addr
        instance = driver.get_instance(instance_alias)
        if not instance:
            logger.debug("Instance has been teminated: %s." % instance_alias)
            return None
        floating_ips = driver._connection.neutron_list_ips(instance)
        if floating_ips:
            floating_ip = floating_ips[0]["floating_ip_address"]
        else:
            floating_ip = driver._connection.neutron_associate_ip(
                instance, *args, **kwargs)["floating_ip_address"]
        _update_status_log(instance, "Networking Complete")
        #TODO: Implement this as its own task, with the result from
        #'floating_ip' passed in. Add it to the deploy_chain before deploy_to
        hostname = ""
        if floating_ip.startswith('128.196'):
            regex = re.compile("(?P<one>[0-9]+)\.(?P<two>[0-9]+)\."
                               "(?P<three>[0-9]+)\.(?P<four>[0-9]+)")
            r = regex.search(floating_ip)
            (one, two, three, four) = r.groups()
            hostname = "vm%s-%s.iplantcollaborative.org" % (three, four)
        else:
            # Find a way to convert new floating IPs to hostnames..
            hostname = floating_ip

        metadata_update = {
            'public-hostname': hostname,
            'public-ip': floating_ip
        }
        #NOTE: This is part of the temp change, should be removed when moving
        # to vxlan
        instance_ports = driver._connection.neutron_list_ports(
            device_id=instance.id)
        network = driver._connection.neutron_get_tenant_network()
        if instance_ports:
            for idx, fixed_ip_port in enumerate(instance_ports):
                fixed_ips = fixed_ip_port.get('fixed_ips', [])
                mac_addr = fixed_ip_port.get('mac_address')
                metadata_update['mac-address%s' % idx] = mac_addr
                metadata_update['port-id%s' % idx] = fixed_ip_port['id']
                metadata_update['network-id%s' % idx] = network['id']
        #EndNOTE:

        update_instance_metadata(driver,
                                 instance,
                                 data=metadata_update,
                                 replace=False)

        logger.info("Assigned IP:%s - Hostname:%s" % (floating_ip, hostname))
        #End
        logger.debug("add_floating_ip task finished at %s." % datetime.now())
        return {"floating_ip": floating_ip, "hostname": hostname}
    except Exception as exc:
        logger.exception("Error occurred while assigning a floating IP")
        #Networking can take a LONG time when an instance first launches,
        #it can also be one of those things you 'just miss' by a few seconds..
        #So we will retry 30 times using limited exp.backoff
        #Max Time: 53min
        countdown = min(2**current.request.retries, 128)
        add_floating_ip.retry(exc=exc, countdown=countdown)
예제 #39
0
파일: driver.py 프로젝트: eandhy/atmosphere
def print_debug():
    log_str = "print_debug task finished at %s." % datetime.now()
    print log_str
    logger.debug(log_str)
예제 #40
0
def echo_test_script():
    return ScriptDeployment('echo "Test deployment working @ %s"' %
                            datetime.now(),
                            name="./deploy_echo.sh")
예제 #41
0
파일: driver.py 프로젝트: eandhy/atmosphere
def clear_empty_ips():
    logger.debug("clear_empty_ips task started at %s." % datetime.now())
    from service import instance as instance_service
    from rtwo.driver import OSDriver
    from api import get_esh_driver
    from service.accounts.openstack import AccountDriver as\
        OSAccountDriver

    identities = Identity.objects.filter(
        provider__type__name__iexact='openstack', provider__active=True)
    key_sorter = lambda ident: attrgetter(ident.provider.type.name, ident.
                                          created_by.username)
    identities = sorted(identities, key=key_sorter)
    os_acct_driver = None
    total = len(identities)
    num_removed = 0
    nets_removed = 0
    for idx, core_identity in enumerate(identities):
        try:
            #Initialize the drivers
            driver = get_esh_driver(core_identity)
            if not isinstance(driver, OSDriver):
                continue
            if not os_acct_driver or\
                    os_acct_driver.core_provider != core_identity.provider:
                os_acct_driver = OSAccountDriver(core_identity.provider)
                logger.info("Initialized account driver")
            # Get useful info
            creds = core_identity.get_credentials()
            tenant_name = creds['ex_tenant_name']
            logger.info("Checking Identity %s/%s - %s" %
                        (idx + 1, total, tenant_name))
            # Attempt to clean floating IPs
            num_ips_removed = driver._clean_floating_ip()
            if num_ips_removed:
                logger.debug("Removed %s ips from OpenStack Tenant %s" %
                             (num_ips_removed, tenant_name))
                num_removed += num_ips_removed
            #Test for active/inactive instances
            instances = driver.list_instances()
            active = any(
                driver._is_active_instance(inst) for inst in instances)
            inactive = all(
                driver._is_inactive_instance(inst) for inst in instances)
            for instance in instances:
                if driver._is_inactive_instance(instance) and instance.ip:
                    # If an inactive instance has floating/fixed IPs.. Remove them!
                    instance_service.remove_ips(driver, instance)
            if active and not inactive:
                #User has >1 active instances AND not all instances inactive
                pass
            elif os_acct_driver.network_manager.get_network_id(
                    os_acct_driver.network_manager.neutron,
                    '%s-net' % tenant_name):
                #User has 0 active instances OR all instances are inactive
                #Network exists, attempt to dismantle as much as possible
                remove_network = not inactive
                logger.info("Removing project network %s for %s" %
                            (remove_network, tenant_name))
                if remove_network:
                    #Sec. group can't be deleted if instances are suspended
                    # when instances are suspended we pass remove_network=False
                    os_acct_driver.delete_security_group(core_identity)
                    os_acct_driver.delete_network(
                        core_identity, remove_network=remove_network)
                    nets_removed += 1
            else:
                #logger.info("No Network found. Skipping %s" % tenant_name)
                pass
        except Exception as exc:
            logger.exception(exc)
    logger.debug("clear_empty_ips task finished at %s." % datetime.now())
    return (num_removed, nets_removed)
예제 #42
0
def render_filter_ele(condtion, admin_class, filter_condtions, selectdate):
    select_ele = '''<select class="form-control" name='{filter_name}' ><option value=''>----</option>'''

    #field_obj为字段属性类,condtion 为需要过滤的 表字段,下面通过_meta方法 获得该字段的属性
    field_obj = admin_class.model._meta.get_field(condtion)

    # dir(admin_class.model._meta)
    # print("field_obj ---------------:")
    # prn_obj(field_obj)
    # print("end obj ---------------")
    # print("meta  :  ",dir(admin_class.model._meta))
    #判断该字段属性choices不为空
    if field_obj.choices:
        selected = ''
        for choice_item in field_obj.choices:
            # print("choice",choice_item,filter_condtions.get(condtion),type(filter_condtions.get(condtion)))
            # print("filter_condtions:",filter_condtions)
            if filter_condtions.get(condtion) == str(choice_item[0]):
                #filter_condtions 为当前GET 请求处理过的 过滤参数,类型为字典
                selected = "selected"

            select_ele += '''<option value='%s' %s>%s</option>''' % (
                choice_item[0], selected, choice_item[1])
            selected = ''
    # print("type(field_obj)",type(field_obj))
    if type(field_obj).__name__ == "ForeignKey":
        selected = ''
        for choice_item in field_obj.get_choices()[1:]:
            if filter_condtions.get(condtion) == str(choice_item[0]):
                selected = "selected"
            select_ele += '''<option value='%s' %s>%s</option>''' % (
                choice_item[0], selected, choice_item[1])
            selected = ''

    if type(field_obj).__name__ in ['DateTimeField', 'DateField']:
        date_els = []
        today_ele = datetime.now().date()
        date_els.append(["今天", today_ele])
        date_els.append(["昨天", today_ele - timedelta(days=1)])
        date_els.append(["近7天", today_ele - timedelta(days=7)])
        date_els.append(["本月", today_ele.replace(day=1)])
        date_els.append(["近30天", today_ele - timedelta(days=30)])
        date_els.append(["近90天", today_ele - timedelta(days=90)])
        date_els.append(["本年", today_ele.replace(month=1, day=1)])
        date_els.append(["365天", today_ele - timedelta(days=365)])

        for d in date_els:
            print(type(str(selectdate)), type(str(d[1])))
            selected = ""
            if selectdate:
                print("111111111111111111111111111111111111")
                if selectdate == str(d[1]):
                    print(d[1], 222222222222222222222222)
                    selected = "selected"
            select_ele += '''<option value='%s' %s>%s</option>''' % (
                d[1], selected, d[0])

        filter_field_name = "%s__gte" % condtion
    else:
        filter_field_name = condtion
    select_ele = select_ele.format(filter_name=filter_field_name)

    select_ele += "</select>"
    return mark_safe(select_ele)
예제 #43
0
def admin_dashboard(request):

    if request.user.is_superuser:
        # filter without 'org_name'
        orgs = OrgProfile.objects.all()
        myFilter_orgs = OrgsFilter(request.GET, queryset=orgs)
        orgs_count = myFilter_orgs.qs.count()
        researchs = OrgResearch.objects.filter(
            publish=True).order_by('-created_at')
        myFilter = OrgsFilter(request.GET, queryset=researchs)
        researchs_count = myFilter.qs.count()
        # filter with 'org_name'
        news = OrgNews.objects.filter(
            Q(publish=True)
            & ~Q(org_name__name='khalil')).order_by('-created_at')
        myFilter = OrgsNewsFilter(request.GET, queryset=news)
        news_count = myFilter.qs.count()
        rapports = OrgRapport.objects.filter(
            publish=True).order_by('-created_at')
        myFilter = OrgsNewsFilter(request.GET, queryset=rapports)
        rapports_count = myFilter.qs.count()
        datas = OrgData.objects.filter(publish=True).order_by('-created_at')
        myFilter = OrgsNewsFilter(request.GET, queryset=datas)
        datas_count = myFilter.qs.count()
        medias = OrgMedia.objects.filter(publish=True).order_by('-created_at')
        myFilter = OrgsNewsFilter(request.GET, queryset=medias)
        medias_count = myFilter.qs.count()
        jobs = OrgJob.objects.filter(publish=True).order_by('-created_at')
        myFilter = OrgsNewsFilter(request.GET, queryset=jobs)
        jobs_count = myFilter.qs.count()
        fundings = OrgFundingOpp.objects.filter(
            publish=True).order_by('-created_at')
        myFilter = OrgsNewsFilter(request.GET, queryset=fundings)
        fundings_count = myFilter.qs.count()
        Capacitys = OrgCapacityOpp.objects.filter(
            publish=True).order_by('-created_at')
        myFilter = OrgsNewsFilter(request.GET, queryset=Capacitys)
        Capacitys_count = myFilter.qs.count()
        devs = DevOrgOpp.objects.filter(publish=True).order_by('-created_at')
        myFilter = OrgsNewsFilter(request.GET, queryset=devs)
        devs_count = myFilter.qs.count()
        our_news = OrgNews.objects.filter(
            Q(publish=True)
            & Q(org_name__name='khalil')).order_by('-created_at')
        myFilter = OrgsNewsFilter(request.GET, queryset=our_news)
        our_news_count = myFilter.qs.count()
        # data visualation
        sdate = str(datetime.now().date() - timedelta(days=6))
        edate = str(datetime.now().date())
        if request.GET:
            sdate = request.GET.get('start_date_pub')
            edate = request.GET.get('end_date_pub')
            if sdate == '':
                sdate = str(datetime.now().date() - timedelta(days=6))
            if edate == '':
                edate = str(datetime.now().date())
        days = []
        delta = datetime.strptime(edate,
                                  '%Y-%m-%d').date() - datetime.strptime(
                                      sdate, '%Y-%m-%d').date()
        for i in range(delta.days + 1):
            day = datetime.strptime(sdate,
                                    '%Y-%m-%d').date() + timedelta(days=i)
            days.append(day)
        # news per day
        days_to_present = []
        counts = []
        if request.GET:
            org_name = request.GET.get('org_name', None)
            if org_name == '':
                for i in range(len(days)):
                    days_to_present.append(str(days[i]))
                    counts.append(
                        OrgNews.objects.filter(
                            Q(publish=True)
                            & Q(published_at__date=days[i])).count())

            else:
                for i in range(len(days)):
                    days_to_present.append(str(days[i]))
                    counts.append(
                        OrgNews.objects.filter(
                            Q(publish=True) & Q(published_at__date=days[i])
                            & Q(org_name__id=org_name)).count())
        else:
            for i in range(len(days)):
                days_to_present.append(str(days[i]))
                counts.append(
                    OrgNews.objects.filter(
                        Q(publish=True)
                        & Q(published_at__date=days[i])).count())

        source = ColumnDataSource(
            data=dict(days_to_present=days_to_present, counts=counts))
        factor_cmap('', palette=Spectral6, factors=days_to_present)
        TOOLTIPS = [
            ('date', "@days_to_present"),
            ('count', "@counts"),
        ]
        p = figure(
            x_range=days_to_present,
            plot_height=250,
            title="عدد الأخبار المنشورة باليوم",
            tools="pan,wheel_zoom,box_zoom,save,zoom_in,hover,zoom_out,reset",
            tooltips=TOOLTIPS)
        p.vbar(x='days_to_present',
               top='counts',
               width=0.9,
               source=source,
               legend_field="days_to_present",
               line_color='white',
               fill_color=factor_cmap('days_to_present',
                                      palette=Spectral6,
                                      factors=days_to_present))
        p.xgrid.grid_line_color = None
        p.ygrid.grid_line_color = None
        p.y_range.start = 0
        p.background_fill_color = "rgb(255, 255, 255)"
        p.border_fill_color = "rgb(255, 255, 255)"
        # p.background_fill_color = "rgba(23, 103, 140, 0.1)"
        # p.border_fill_color = "rgba(23, 103, 140, 0.1)"
        p.title.align = 'center'
        p.legend.visible = False
        script, div = components(p)
        # orgs by days
        counts_org = []
        for i in range(len(days)):
            counts_org.append(
                OrgProfile.objects.filter(
                    Q(publish=True) & Q(published_at__date=days[i])).count())

        source = ColumnDataSource(
            data=dict(days_to_present=days_to_present, counts_org=counts_org))
        factor_cmap('days_to_present',
                    palette=Spectral6,
                    factors=days_to_present)
        TOOLTIPS = [
            ('name', "@days_to_present"),
            ('count', "@counts_org"),
        ]
        p_org = figure(
            x_range=days_to_present,
            plot_height=250,
            title="عدد المنظمات المنشورة باليوم",
            tools="pan,wheel_zoom,box_zoom,save,zoom_in,hover,zoom_out,reset",
            tooltips=TOOLTIPS)
        p_org.vbar(x='days_to_present',
                   top='counts_org',
                   width=0.9,
                   source=source,
                   legend_field="days_to_present",
                   line_color='white',
                   fill_color=factor_cmap('days_to_present',
                                          palette=Spectral6,
                                          factors=days_to_present))
        p_org.xgrid.grid_line_color = None
        p_org.ygrid.grid_line_color = None
        p_org.y_range.start = 0
        p_org.background_fill_color = "rgb(255, 255, 255)"
        p_org.border_fill_color = "rgb(255, 255, 255)"
        p_org.title.align = 'center'
        p_org.legend.visible = False
        script_org, div_org = components(p_org)
        # reports by days
        counts_report = []
        if request.GET:
            org_name = request.GET.get('org_name', None)
            if org_name == '':
                for i in range(len(days)):
                    counts_report.append(
                        OrgRapport.objects.filter(
                            Q(publish=True)
                            & Q(published_at__date=days[i])).count())

            else:
                for i in range(len(days)):
                    counts_report.append(
                        OrgRapport.objects.filter(
                            Q(publish=True) & Q(published_at__date=days[i])
                            & Q(org_name__id=org_name)).count())
        else:
            for i in range(len(days)):
                counts_report.append(
                    OrgRapport.objects.filter(
                        Q(publish=True)
                        & Q(published_at__date=days[i])).count())

        source = ColumnDataSource(data=dict(days_to_present=days_to_present,
                                            counts_report=counts_report))
        factor_cmap('', palette=Spectral6, factors=days_to_present)
        TOOLTIPS = [
            ('name', "@days_to_present"),
            ('count', "@counts_report"),
        ]
        p_report = figure(
            x_range=days_to_present,
            plot_height=250,
            title="عدد التقارير المنشورة باليوم",
            tools="pan,wheel_zoom,box_zoom,save,zoom_in,hover,zoom_out,reset",
            tooltips=TOOLTIPS)
        p_report.vbar(x='days_to_present',
                      top='counts_report',
                      width=0.9,
                      source=source,
                      legend_field="days_to_present",
                      line_color='white',
                      fill_color=factor_cmap('days_to_present',
                                             palette=Spectral6,
                                             factors=days_to_present))
        p_report.xgrid.grid_line_color = None
        p_report.ygrid.grid_line_color = None
        p_report.y_range.start = 0
        p_report.background_fill_color = "rgb(255, 255, 255)"
        p_report.border_fill_color = "rgb(255, 255, 255)"
        p_report.title.align = 'center'
        p_report.legend.visible = False
        script_report, div_report = components(p_report)
        # jobs per days
        counts_jobs = []
        if request.GET:
            org_name = request.GET.get('org_name', None)
            if org_name == '':
                for i in range(len(days)):
                    counts_jobs.append(
                        OrgJob.objects.filter(
                            Q(publish=True)
                            & Q(published_at__date=days[i])).count())

            else:
                for i in range(len(days)):
                    counts_jobs.append(
                        OrgJob.objects.filter(
                            Q(publish=True) & Q(published_at__date=days[i])
                            & Q(org_name__id=org_name)).count())
        else:
            for i in range(len(days)):
                counts_jobs.append(
                    OrgJob.objects.filter(
                        Q(publish=True)
                        & Q(published_at__date=days[i])).count())

        source = ColumnDataSource(data=dict(days_to_present=days_to_present,
                                            counts_jobs=counts_jobs))
        factor_cmap('', palette=Spectral6, factors=days_to_present)
        TOOLTIPS = [
            ('name', "@days_to_present"),
            ('count', "@counts_jobs"),
        ]
        p_jobs = figure(
            x_range=days_to_present,
            plot_height=250,
            title="عدد التقارير المنشورة باليوم",
            tools="pan,wheel_zoom,box_zoom,save,zoom_in,hover,zoom_out,reset",
            tooltips=TOOLTIPS)
        p_jobs.vbar(x='days_to_present',
                    top='counts_jobs',
                    width=0.9,
                    source=source,
                    legend_field="days_to_present",
                    line_color='white',
                    fill_color=factor_cmap('days_to_present',
                                           palette=Spectral6,
                                           factors=days_to_present))
        p_jobs.xgrid.grid_line_color = None
        p_jobs.ygrid.grid_line_color = None
        p_jobs.y_range.start = 0
        p_jobs.background_fill_color = "rgb(255, 255, 255)"
        p_jobs.border_fill_color = "rgb(255, 255, 255)"
        p_jobs.title.align = 'center'
        p_jobs.legend.visible = False
        script_jobs, div_jobs = components(p_jobs)

        # v_count = OrgNews.objects.filter(Q(publish=True)& Q(published_at__date='2020-11-05')).count()
        # for pro in profs:
        #     org_type = pro.get_org_type_display()
        #     position_work = pro.get_position_work_display()
        #     # city_work = pro.get_city_work_display()
        #     work_domain = pro.get_work_domain_display()
        #     target_cat = pro.get_target_cat_display()
        #     org_registered_country = pro.get_org_registered_country_display()
        #     w_polic_regulations = pro.get_w_polic_regulations_display()

        context = {

            # 'org_type': org_type,
            # 'position_work': position_work,
            # # 'city_work': city_work,
            # 'work_domain': work_domain,
            # 'target_cat': target_cat,
            # 'org_registered_country': org_registered_country,
            # 'w_polic_regulations': w_polic_regulations,
            'news_count': news_count,
            'myFilter': myFilter,
            'orgs_count': orgs_count,
            'myFilter_orgs': myFilter_orgs,
            'rapports_count': rapports_count,
            'datas_count': datas_count,
            'medias_count': medias_count,
            'researchs_count': researchs_count,
            'jobs_count': jobs_count,
            'fundings_count': fundings_count,
            'Capacitys_count': Capacitys_count,
            'devs_count': devs_count,
            'our_news_count': our_news_count,
            # 'sdate':sdate,
            # 'edate':edate,
            'days': days,
            'delta': delta,
            'script': script,
            'div': div,
            'script_org': script_org,
            'div_org': div_org,
            'script_report': script_report,
            'div_report': div_report,
            'script_jobs': script_jobs,
            'div_jobs': div_jobs,
            #  'org_name':org_name
        }

    else:
        return HttpResponse(
            'You dont have the permitions to entro this page :) ')

    return render(request, 'profiles/layout_profile.html', context)
예제 #44
0
from django.core.management.base import BaseCommand
from curriculum_tracking.models import RecruitProjectReview, AgileCard, ContentItem
from core.models import RecruitCohort, User
from django.utils.timezone import datetime, timedelta
import os
from pathlib import Path
import csv

today = datetime.now().date()


def recruit_report():
    results = []
    # date_headings = []

    today = datetime.now().date()

    for recruit_cohort in (RecruitCohort.objects.filter(
            user__active=True).filter(
                cohort__active=True).prefetch_related("user").prefetch_related(
                    "cohort").prefetch_related("cohort__cohort_curriculum")):
        user = recruit_cohort.user
        if not user.active:
            continue

        print(user)

        user_data = {
            "user":
            user.email,
            "cohort":
예제 #45
0
    def save(self, *args, **kwargs):
        '''
        applying table level validation by overriding the default behaviour
        :param args:
        :param kwargs:
        :return:
        '''

        try:
            minimum_validator(self.item.starting_bid)(self.amount)
        except ValidationError:
            raise ValidationError(
                'Amount should not be lower than base amount')

        try:
            lastbid = bidOnItem.objects.filter(
                item=self.item).latest('creation_date')
            lastuser = lastbid.user
            lastamount = lastbid.amount

            minimum_validator(lastbid.amount)(self.amount)
        except ValidationError:
            raise ValidationError('Amount is Lesser than last Bid ')
        except bidOnItem.DoesNotExist:
            '''
                no previous bids exist for this item
            '''
            lastuser = None
            lastamount = 0
            pass

        try:
            assert self.user != lastuser
        except AssertionError:
            raise ValidationError("last bidder is the same as current bidder")

        # final validation

        curtime = datetime.now()

        utcawarecurtime = curtime.replace(tzinfo=LOCAL_TZ)

        try:


            endtime = self.item.expiration_time.replace(tzinfo=pytz.utc).astimezone(LOCAL_TZ) or\
                      utcawarecurtime

            print("curtime", utcawarecurtime)
            print("endtime", endtime)

            assert utcawarecurtime <= endtime

        except AssertionError:
            raise ValidationError("Bidding for this item got closed on:" +
                                  self.item.endtime)

        try:
            import sys
            assert lastamount <= (self.item.expected_amount or sys.maxint)

        except AssertionError:
            raise ValidationError(
                "Item was already sold in the desired amount")

        # save in the db
        return super(bidOnItem, self).save(*args, **kwargs)
예제 #46
0
def recruit_report():
    results = []
    # date_headings = []

    today = datetime.now().date()

    for recruit_cohort in (RecruitCohort.objects.filter(
            user__active=True).filter(
                cohort__active=True).prefetch_related("user").prefetch_related(
                    "cohort").prefetch_related("cohort__cohort_curriculum")):
        user = recruit_cohort.user
        if not user.active:
            continue

        print(user)

        user_data = {
            "user":
            user.email,
            "cohort":
            str(recruit_cohort.cohort),
            "employer_partner":
            recruit_cohort.employer_partner,
            "in review card_count":
            (AgileCard.objects.filter(status=AgileCard.IN_REVIEW).filter(
                reviewers__in=[user]).count()),
            "complete project card_count":
            (AgileCard.objects.filter(status=AgileCard.IN_REVIEW).filter(
                assignees__in=[user]).filter(
                    content_item__content_type=ContentItem.PROJECT).count()),
        }
        add_daily_review_counts(user, user_data)

        # total = 0
        # for day in range(7):
        #     maximum = today - timedelta(days=day)
        #     minimum = maximum - timedelta(days=1)
        #     count = (
        #         RecruitProjectReview.objects.filter(reviewer_user=user)
        #         .filter(timestamp__gte=minimum)
        #         .filter(timestamp__lte=maximum)
        #         .count()
        #     )
        #     # if count:
        #     #     breakpoint()
        #     heading = minimum.strftime("%a %d %b")
        #     date_headings.append(heading)
        #     user_data[heading] = count
        #     total = total + count

        # user_data["total reviews"] = total

        results.append(user_data)

    os.makedirs("gitignore", exist_ok=True)
    results.sort(key=lambda d: d["total reviews"])
    with open(
            Path(
                f"gitignore/recruit_reviews_{today.strftime('%a %d %b %Y')}.csv"
            ), "w") as f:
        writer = csv.writer(f)
        writer.writerow(user_data.keys())
        writer.writerows([d.values() for d in results])
def get_session_expiry():
    return datetime.now() + timedelta(days=1.0)
예제 #48
0
def remove_empty_networks():
    logger.debug("remove_empty_networks task started at %s." % datetime.now())
    for provider in Provider.get_active(type_name='openstack'):
        remove_empty_networks_for.apply_async(args=[provider.id])
예제 #49
0
 def recordAcquisitionExecution(executor,exec_time,exec_params,cost,order_client_id):
   now = datetime.now(getTimeZoneInfo())
   action = 'buy'
   entry = ETFPairRobotExecutionData.objects.create(execution=executor,trade_time=now,exec_time=exec_time,
                                            exec_action=action,cost_or_income=cost,order_client_id=order_client_id)
예제 #50
0
 def recordDispositionExecution(executor,exec_time,exec_params,income,order_client_id):
   now = datetime.now(getTimeZoneInfo())
   action='sell'
   ETFPairRobotExecutionData.objects.create(execution=executor,trade_time=now,exec_time=exec_time,
                                            exec_action=action,cost_or_income=income,order_client_id=order_client_id)
예제 #51
0
    def test_edit_task_fail_not_owner(self):
        self.client1.force_authenticate(self.user1)
        response = self.client1.patch('/api/task/30/',
                                      data={'finished': datetime.now()})

        self.assertEqual(response.status_code, 403)  # 404?
예제 #52
0
파일: views.py 프로젝트: cleanenergy/site
def cliente_geracao(request, pk):
	try:
		cliente = Cliente.objects.get(user__pk=pk)
	except ObjectDoesNotExist:
		cliente = None

	if cliente:

		estimativa = 0.0046875			# kWh/Wp		(Estimativa de dados da Clean)
		consumoTV = 0.136				# kWh/h			(TV 55" Sony KD-55X705E)
		consumoCelular = 0.015 			# kWh/carga		(Samsung Galaxy S9)
		consumoMaquinaLavar = 0.27		# kWh/ciclo		(Samsung WD136UVHJWDF)
		consumoLampadaLed = 0.336		# kWh/dia		(Lâmpada 14w equivalente a 100W)
		consumoCarroEletrico = 	0.15	# kWh/km		(Tesla Model S)
		consumoChuveiro = 5.5 			# kWh/hora		(Chuveiro 5.500W)
		tarifa = 0.83 					# R$/kWh		(Tarifa Cemig bandeira verde 7/2018)


		data = datetime.strptime(request.GET.get("data", datetime.now().strftime("%d-%m-%Y")), "%d-%m-%Y")
		idUg = request.GET.get("ug", None)
		ugs = Geradora.objects.filter(cliente=cliente)

		if idUg:
			ug = Geradora.objects.filter(id=idUg)[:1][0]
			dadosGeracao = getDadosGeracao(ug=ug, cliente=cliente, data=data)

			return render(request, "controle/clientes_geracao.html", {
				"pk": pk,
				"date": data.strftime("%d-%m-%Y"),
				"ug": ug,
				"ugs": ugs,
				"dadosDia": str(dadosGeracao["dadosDia"]["dados"]),
				"labelsDia": str(dadosGeracao["dadosDia"]["labels"]),
				"dadosMes": str(dadosGeracao["dadosMes"]["dados"]),
				"labelsMes": str(dadosGeracao["dadosMes"]["labels"]),
				"mediaMes": round(dadosGeracao["potenciaInstalada"]*estimativa, 1),
				"dadosAno": str(dadosGeracao["dadosAno"]["dados"]),
				"labelsAno": str(dadosGeracao["dadosAno"]["labels"]),
				"potenciaAtual": round(dadosGeracao["potenciaAtual"], 1),
				"potenciaInstalada": round(dadosGeracao["potenciaInstalada"],0),
				"energiaDia": round(dadosGeracao["energiaDia"], 1),
				"percentualDia": round(dadosGeracao["energiaDia"]/(dadosGeracao["potenciaInstalada"]*estimativa)*100,2),
				"energiaMes": round(dadosGeracao["energiaMes"],1),
				"consumoTV": int(dadosGeracao["energiaMes"]/consumoTV),
				"consumoCelular": int(dadosGeracao["energiaMes"]/consumoCelular),
				"consumoMaquinaLavar": int(dadosGeracao["energiaMes"]/consumoMaquinaLavar),
				"consumoLampadaLed": int(dadosGeracao["energiaMes"]/consumoLampadaLed),
				"consumoCarroEletrico": int(dadosGeracao["energiaMes"]/consumoCarroEletrico),
				"consumoChuveiro": int(dadosGeracao["energiaMes"]/consumoChuveiro),
				"dinheiroMes": round(dadosGeracao["energiaMes"]*tarifa, 2),
				"dinheiroAno": round(dadosGeracao["energiaAno"]*tarifa, 2),
				})
		else:
			ug = ugs.first()
			dadosGeracao = getDadosGeracao(ug=ug, cliente=cliente, data=data)

			return render(request, "controle/clientes_geracao.html", {
				"pk": pk,
				"date": data.strftime("%d-%m-%Y"),
				"ug": ug,
				"ugs": ugs,
				"dadosDia": str(dadosGeracao["dadosDia"]["dados"]),
				"labelsDia": str(dadosGeracao["dadosDia"]["labels"]),
				"dadosMes": str(dadosGeracao["dadosMes"]["dados"]),
				"labelsMes": str(dadosGeracao["dadosMes"]["labels"]),
				"mediaMes": round(dadosGeracao["potenciaInstalada"]*estimativa, 1),
				"dadosAno": str(dadosGeracao["dadosAno"]["dados"]),
				"labelsAno": str(dadosGeracao["dadosAno"]["labels"]),
				"potenciaAtual": round(dadosGeracao["potenciaAtual"], 1),
				"potenciaInstalada": round(dadosGeracao["potenciaInstalada"],0),
				"energiaDia": round(dadosGeracao["energiaDia"], 1),
				"percentualDia": round(dadosGeracao["energiaDia"]/(dadosGeracao["potenciaInstalada"]*estimativa)*100,2),
				"energiaMes": round(dadosGeracao["energiaMes"],1),
				"consumoTV": int(dadosGeracao["energiaMes"]/consumoTV),
				"consumoCelular": int(dadosGeracao["energiaMes"]/consumoCelular),
				"consumoMaquinaLavar": int(dadosGeracao["energiaMes"]/consumoMaquinaLavar),
				"consumoLampadaLed": int(dadosGeracao["energiaMes"]/consumoLampadaLed),
				"consumoCarroEletrico": int(dadosGeracao["energiaMes"]/consumoCarroEletrico),
				"consumoChuveiro": int(dadosGeracao["energiaMes"]/consumoChuveiro),
				"dinheiroMes": round(dadosGeracao["energiaMes"]*tarifa, 2),
				"dinheiroAno": round(dadosGeracao["energiaAno"]*tarifa, 2),
				})

	return redirect('/controle/')
예제 #53
0
    def test_edit_task_fail_unauth(self):
        response = self.client1.patch('/api/task/10/',
                                      data={'finished': datetime.now()})

        self.assertEqual(response.status_code, 401)
예제 #54
0
파일: views.py 프로젝트: cleanenergy/site
def getEnergia(cliente):
	ugs = Geradora.objects.filter(cliente = cliente)
	data= datetime.now()

	potenciaInstalada = 0
	potenciaAtual = 0
	energiaDia = 0
	energiaMes = 0
	energiaAno = 0

	for ug in ugs:
		# Dados do dia
		fimDia = datetime(data.year, data.month, data.day, 23, 59, 59)
		inicioDia = datetime(data.year, data.month, data.day)

		medidasDia = Medida.objects.filter(ug=ug, data_hora__gte=inicioDia, data_hora__lte=fimDia).order_by("data_hora")
		print(medidasDia)
		ultimas = Medida.objects.filter(ug=ug, data_hora__gte=inicioDia).order_by("-data_hora")[:2]
		try:
			deltaE = ultimas[0].medida - ultimas[1].medida
			deltaT = ultimas[0].data_hora - ultimas[1].data_hora
			deltaT = deltaT.total_seconds()/3600
			potenciaAtual = potenciaAtual + (deltaE/deltaT)
		except:
			potenciaAtual = potenciaAtual + 0

		try:
			primeiraMedida = medidasDia.first()
			ultimaMedida = medidasDia.last()
			energiaDia = energiaDia + (ultimaMedida.medida - primeiraMedida.medida)
		except:	
			energiaDia = energiaDia + 0

		# Dados do Mês
		if data.month + 1 > 12:
			fimMes = datetime(data.year + 1,1, 1) - timedelta(seconds=1)
		else:
			fimMes = datetime(data.year, data.month + 1, 1) - timedelta(seconds=1)

		
		inicioMes = datetime(data.year, data.month, 1)

		medidasMes = Medida.objects.filter(ug=ug, data_hora__gte=inicioMes, data_hora__lte=fimMes).order_by("data_hora")
		try:
			primeiraMedida = medidasMes.first()
			ultimaMedida = medidasMes.last()
			energiaMes = energiaMes + (ultimaMedida.medida - primeiraMedida.medida)
		except:
			energiaMes = energiaMes + 0

		# Dados do Ano
		fimAno = datetime(data.year + 1, 1, 1) - timedelta(seconds=1)
		inicioAno = datetime(data.year, 1, 1)

		medidasAno = Medida.objects.filter(ug=ug, data_hora__gte=inicioAno, data_hora__lte=fimAno).order_by("data_hora")
		try:
			primeiraMedida = medidasAno.first()
			ultimaMedida = medidasAno.last()
			energiaAno = energiaAno + (ultimaMedida.medida - primeiraMedida.medida)
		except:
			energiaAno = energiaAno + 0

		potenciaInstalada = potenciaInstalada + ug.potencia


	return {
		"potenciaAtual": potenciaAtual,		 			# [ W ]
		"potenciaInstalada": potenciaInstalada*1000,	# [ W ]
		"energiaDia": energiaDia/1000,					# [ kWh ]
		"energiaMes": energiaMes/1000,					# [ kWh ]
		"energiaAno": energiaAno/1000					# [ kWh ]
		}
예제 #55
0
파일: views.py 프로젝트: khanafeer/cbat
 def post(self,request):
     try:
         if request.user.is_authenticated:
             data = request.POST
             if data:
                 receiver = User.objects.get(username=data['receiver'])
                 if receiver:
                     m = Msg_inbox(sender=request.user,receiver=receiver,msg_body=data['msg_body'],sent_time=datetime.now(),received_time=datetime.now())
                     m.save()
                     return redirect('/')
             else:
                 raise Http404
         else:
             return redirect('/login')
     except:
         raise Http404
예제 #56
0
 def save(self, *args, **kwargs):
     self.date_edited = datetime.now()
     super(LinkPost, self).save(*args, **kwargs)
예제 #57
0
def render_filter_ele(condition, admin_class, filter_conditions):
    """
    :param condition: 需要过滤的列 列表
                      col_obj = king_admin.enabled_admins['crm']['customer']
                      col_obj.list_filters
                        --->  ['status', 'source', 'consult_course', 'consultant']
    :param admin_class: 获取的数据对象
    :param filter_conditions:  所有需要过滤的列信息
    :return:
    """
    select_ele = """<select class="form-control" name='{filter_field_name}'> <option>All</option>"""
    field_obj = admin_class.model._meta.get_field(condition)
    # 获取列对象, 每个列都类型, 根据不同的列类型去做不同的事
    # 主要是 choices 、外键、多对多、一对多类型的数据需要二次处理

    # 处理 choices 类型的数据
    if field_obj.choices:
        selected = ""
        for choices_item in field_obj.choices:
            if str(choices_item[0]) == filter_conditions.get(condition):
                selected = "selected"
            select_ele += """<option %s value='%s'> %s </option>""" % (selected, choices_item[0], choices_item[1])
            selected = ""

    # 处理外键类型的数据
    if type(field_obj).__name__ == "ForeignKey":
        selected = ""
        for choices_item in field_obj.get_choices()[1:]:
            # 获取外键中的数据, 数据为元组类型, 第一个数据为 ------, 需要去除它, 所以使用切片功能
            if str(choices_item[0]) == filter_conditions.get(condition):
                selected = "selected"
            select_ele += """<option %s value='%s'> %s </option>""" % (selected, choices_item[0], choices_item[1])
            selected = ""

    # 处理时间类型的数据
    if type(field_obj).__name__ in ("DateField", "DateTimeField"):
        date_eles = []
        today_ele = datetime.now().date()
        date_eles.append(['今天', today_ele])
        date_eles.append(['昨天', today_ele - timedelta(days=1)])
        date_eles.append(['近7天', today_ele - timedelta(days=7)])
        date_eles.append(['本月', today_ele.replace(day=1)])
        date_eles.append(['近30天', today_ele - timedelta(days=30)])
        date_eles.append(['近90天', today_ele - timedelta(days=90)])
        date_eles.append(['近半年', today_ele - timedelta(days=180)])
        date_eles.append(['本年', today_ele.replace(month=1, day=1)])
        date_eles.append(['近一年', today_ele - timedelta(days=365)])
        selected = ''

        # 设置 select 标签中的 name 属性值
        filter_field_name = '%s__gte' % condition
        for item in date_eles:
            if str(item[1]) == str(filter_conditions.get(filter_field_name)):
                selected = "selected"
            select_ele += """<option %s value='%s'> %s </option>""" % (selected, item[1], item[0])
            selected = ''

    else:
        filter_field_name = condition

    select_ele += """</select>"""

    select_ele = select_ele.format(filter_field_name=filter_field_name)
    return mark_safe(select_ele)
예제 #58
0
def import_online(path, onidc_id):
    fileds = [
        'name', 'creator', 'rack', 'client', 'created', 'onidc', 'sn', 'model',
        'ipaddr', 'style', 'units', 'pdus', 'tags'
    ]
    workbook = xlrd.open_workbook(path)
    sheets = workbook.sheet_names()
    worksheet = workbook.sheet_by_name(sheets[0])
    # 设置导入错误日志记录到一个字典中
    handler_error = []
    handler_warning = []
    handler_success = []
    index = 0
    headers = None
    for index, row in enumerate(worksheet.get_rows(), 1):
        header = index
        if header == 1:
            # 跳过表头
            continue
        if header == 2:
            # 获取字段名称
            headers = [h.value for h in row]
            continue
        data = dict(zip(headers, [k.value for k in row]))
        raw = {k: data.get(k) for k in fileds}
        try:
            created = datetime.strptime(data.get('created'), '%Y-%m-%d')
        except BaseException:
            created = datetime.now().date().strftime('%Y-%m-%d')
        raw.update(**dict(created=created, sn=force_text(data.get('sn'))))
        verify = Device.objects.filter(name=raw.get('name'))
        if verify.exists():
            msg = "第{}行:{}设备已存在".format(index, raw.get('name'))
            handler_error.append(msg)
            continue
        else:
            style = get_or_create_style(raw.get('style'), onidc_id)
            creator = get_creator(raw.get('creator'))
            # 获取机柜信息
            rack, err = get_rack(raw.get('rack'), onidc_id)
            if not rack:
                msg = "第{}行:{}".format(index, err)
                handler_error.append(msg)
                continue
            # 获取客户信息
            client, err = get_or_create_client(raw.get('client'), onidc_id)
            if not client:
                msg = "第{}行:{}".format(index, err)
                handler_error.append(msg)
                continue
            # 实例化在线设备
            instance = Online(created=created,
                              style=style,
                              creator=creator,
                              rack=rack,
                              client=client,
                              name=raw.get('name'),
                              sn=raw.get('sn'),
                              ipaddr=raw.get('ipaddr'),
                              model=raw.get('model'),
                              onidc_id=onidc_id)
            instance.save()
            # 保存U位
            units, err = clean_units(raw.get('units'), rack.pk)
            if units:
                for u in units:
                    instance.units.add(u)
                units.update(actived=False)
                instance.save()
            else:
                msg = "第{}行:{}".format(index, err)
                handler_error.append(msg)
                # U位不对,删除本实例
                instance.delete()
                continue
            handler_success.append(instance.name)
            log_action(user_id=creator.pk,
                       content_type_id=get_content_type_for_model(
                           instance, True).pk,
                       object_id=instance.pk,
                       action_flag="新增",
                       created=instance.created)
            # 保存PDU
            pdus, err = clean_pdus(raw.get('pdus'), rack.pk)
            if pdus:
                for p in pdus:
                    instance.pdus.add(p)
                pdus.update(actived=False)
                instance.save()
            else:
                msg = "第{}行:{}".format(index, err)
                handler_warning.append(msg)
                # fix: pdus is none, no callback device_post_save function
                # continue
            # 保存TAGS
            tags = clean_tags(raw.get('tags'), onidc_id, creator.pk)
            if tags:
                for t in tags:
                    instance.tags.add(t)
                instance.save()
            device_post_save(instance.pk)
    total = (index - 2)
    return handler_error, handler_warning, handler_success, total
예제 #59
0
파일: driver.py 프로젝트: eandhy/atmosphere
def _update_status_log(instance, status_update):
    now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    try:
        user = instance._node.extra['metadata']['creator']
    except KeyError, no_user:
        user = "******"
예제 #60
0
class RecordLogTable(models.Model):
    username = models.CharField(max_length=100)
    recordclass = models.CharField(max_length=150)
    recordvalue = models.TextField(max_length=10000)
    logtime = models.DateTimeField(default=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))