Example #1
0
    def calc_delay(self, cr, uid, vals):
        """ Calculates delay of work order.
        @return: Delay
        """
        code_lst = []
        time_lst = []

        code_ids = self.pool.get('mrp_operations.operation.code').search(cr, uid, [('id','=',vals['code_id'])])
        code = self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids)[0]

        oper_ids = self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
        oper_objs = self.browse(cr,uid,oper_ids)

        for oper in oper_objs:
            code_lst.append(oper.code_id.start_stop)
            time_lst.append(oper.date_start)

        code_lst.append(code.start_stop)
        time_lst.append(vals['date_start'])
        diff = 0
        for i in range(0,len(code_lst)):
            if code_lst[i] == 'pause' or code_lst[i] == 'done' or code_lst[i] == 'cancel':
                if not i: continue
                if code_lst[i-1] not in ('resume','start'):
                   continue
                a = datetime.strptime(time_lst[i-1],'%Y-%m-%d %H:%M:%S')
                b = datetime.strptime(time_lst[i],'%Y-%m-%d %H:%M:%S')
                diff += (b-a).days * 24
                diff += (b-a).seconds / float(60*60)
        return diff
Example #2
0
def day_is_complete(day_str):
	# Return true if there are evenly spaced logs throughout the day
	day_start = datetime.strptime(day_str + " 00:00", "%Y-%m-%d %H:%M")
	day_end = datetime.strptime(day_str + " 23:59", "%Y-%m-%d %H:%M")
	arp_logs = ArpLog.objects.filter(runtime__gt=day_start, runtime__lt=day_end).order_by('runtime')
	print(arp_logs.count())
	return True
Example #3
0
def json_date_to_datetime(dateraw):
    cy = datetime.isocalendar(date.today())[0]
    try:
        newdate = datetime.strptime(dateraw + str(cy), '%b %d%Y')
    except ValueError:
        newdate = datetime.strptime(dateraw, '%m/%d/%y')
    return newdate
def get_data(var_id, row, col):

    base_url = ('http://dapds00.nci.org.au/thredds/dodsC/rr9/Climate/eMAST/'
           'ANUClimate/0_01deg/v1m0_aus/mon/land/%s/e_01/1970_2012/' % (var_id))
    emast_id = "eMAST_ANUClimate_mon_tmax_v1m0"
    start_date = "1970-01-01"
    stop_date = "2000-12-31"

    current = datetime.strptime(start_date, "%Y-%m-%d")
    stop = datetime.strptime(stop_date, "%Y-%m-%d")

    tmax = []
    dates = []
    while current < stop:

        if current.month < 10:
            month = "0%s" % (current.month)
        else:
            month = "%s" % (current.month)
        year = current.year

        url = "%s%s_%s%s.nc" % (base_url, emast_id, year, month)

        dataset = open_url(url)
        variable = dataset['air_temperature']
        #print variable[0,2000:2005,2000:2005].array[:]
        tmax.append(variable[0,2000:2005,2000:2005].array[:][0][0][0])
        dates.append(current)

        current += relativedelta(months=1)

    f = open("tmax_%d_%d.txt" % (row, col), "w")
    for i in xrange(tmax):
        f >> tmax
    f.close()
Example #5
0
    def search(self, serie, numero, remitente, destinatario, sucursal, fecha):
        flat = serie or numero or remitente or destinatario or fecha
        tz = timezone.get_current_timezone()
        if flat:
            if fecha:
                "si se ingreso fecha"
                date = datetime.strptime(fecha, "%d/%m/%Y")
                end_date = timezone.make_aware(date, tz)
                start_date = end_date - timedelta(days=7)
            else:

                date = datetime.strptime("01/10/2015", "%d/%m/%Y")
                end_date = timezone.now()
                start_date = timezone.make_aware(date, tz)
        else:
            end_date = timezone.now()
            start_date = end_date - timedelta(days=7)

        busqueda = self.annotate(
            saldo=F('depositslip__total_amount')-F('amount')
        ).filter(
            depositslip__serie__icontains=serie,
            depositslip__number__icontains=numero,
            depositslip__sender__full_name__icontains=remitente,
            depositslip__addressee__full_name__icontains=destinatario,
            depositslip__state='2',
            depositslip__destination=sucursal,
            depositslip__created__range=(start_date, end_date)
        )
        return busqueda
    def _compute_day(self, cr, uid, ids, fields, args, context=None):
        """
        @param cr: the current row, from the database cursor,
        @param uid: the current user’s ID for security checks,
        @param ids: List of Openday’s IDs
        @return: difference between current date and log date
        @param context: A standard dictionary for contextual values
        """
        res = {}
        for issue in self.browse(cr, uid, ids, context=context):
            for field in fields:
                res[issue.id] = {}
                duration = 0
                ans = False
                hours = 0

                if field in ['day_open']:
                    if issue.date_open:
                        date_create = datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
                        date_open = datetime.strptime(issue.date_open, "%Y-%m-%d %H:%M:%S")
                        ans = date_open - date_create

                elif field in ['day_close']:
                    if issue.date_closed:
                        date_create = datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
                        date_close = datetime.strptime(issue.date_closed, "%Y-%m-%d %H:%M:%S")
                        ans = date_close - date_create
                if ans:
                    duration = float(ans.days)
                    res[issue.id][field] = abs(float(duration))
        return res
 def _newInstance(self, name, value):
     types = self._getPropertyType(name)
     propertyType = types[0]
     isArray = types[1]
     if propertyType == bool:
         if isArray:
             return [x for x in value[value.keys()[0]]]
         else:
             return value
     elif propertyType == datetime:
         format = "%Y-%m-%d %H:%M:%S"
         if isArray:
             return [datetime.strptime(x, format) for x in value[value.keys()[0]]]
         else:
             return datetime.strptime(value, format)
     elif propertyType == str:
         if isArray:
             return [x.encode("utf-8") for x in value[value.keys()[0]]]
         else:
             #like taobao.simba.rpt.adgroupbase.get, response.rpt_adgroup_base_list is a json string,but will be decode into a list via python json lib 
             if not isinstance(value,str):
                 #the value should be a json string 
                 return value
             return value.encode("utf-8")
     else:
         if isArray:
             return [propertyType(x) for x in value[value.keys()[0]]]
         else:
             return propertyType(value)
Example #8
0
def valid_admin_cookie(cookie):
    if g.read_only_mode:
        return (False, None)

    # parse the cookie
    try:
        first_login, last_request, hash = cookie.split(',')
    except ValueError:
        return (False, None)

    # make sure it's a recent cookie
    try:
        first_login_time = datetime.strptime(first_login, COOKIE_TIMESTAMP_FORMAT)
        last_request_time = datetime.strptime(last_request, COOKIE_TIMESTAMP_FORMAT)
    except ValueError:
        return (False, None)

    cookie_age = datetime.utcnow() - first_login_time
    if cookie_age.total_seconds() > g.ADMIN_COOKIE_TTL:
        return (False, None)

    idle_time = datetime.utcnow() - last_request_time
    if idle_time.total_seconds() > g.ADMIN_COOKIE_MAX_IDLE:
        return (False, None)

    # validate
    expected_cookie = c.user.make_admin_cookie(first_login, last_request)
    return (constant_time_compare(cookie, expected_cookie),
            first_login)
    def __parser_creditcard_transaction(row, report):
        if len(row) != 27:
            report['lines_with_wrong_size'].append(row)
            raise ValueError('The expected parameter count for CreditCardTransaction parser is 27', len(row))

        transaction = {
            'order_key': UUID(row[1]),
            'order_reference': row[2],
            'merchant_key': UUID(row[3]),
            'merchant_name': row[4],
            'transaction_key': UUID(row[5]),
            'transaction_key_to_acquirer': row[6],
            'transaction_reference': row[7],
            'creditcard_brand': row[8],
            'creditcard_number': row[9],
            'installment_count': int(row[10]) if len(row[10].strip()) > 0 else 0,
            'acquirer_name': row[11],
            'status': row[12],
            'amount_in_cents': long(row[13]) if len(row[13].strip()) > 0 else long(0),
            'iata_amount_in_cents': long(row[14]) if len(row[14].strip()) > 0 else long(0),
            'authorization_code': row[15],
            'transaction_identifier': row[16],
            'unique_sequential_number': row[17],
            'captured_amount_in_cents': long(row[19]) if len(row[19].strip()) > 0 else long(0),
            'authorized_amount_in_cents': long(row[18]) if len(row[18].strip()) > 0 else long(0),
            'voided_amount_in_cents': long(row[20]) if len(row[20].strip()) > 0 else long(0),
            'refunded_amount_in_cents': long(row[21]) if len(row[21].strip()) > 0 else long(0),
            'acquirer_authorization_return_code': row[22],
            'authorized_date': datetime.strptime(row[23], "%Y-%m-%dT%H:%M:%S") if len(row[23].strip()) > 0 else None,
            'captured_date': datetime.strptime(row[24], "%Y-%m-%dT%H:%M:%S") if len(row[24].strip()) > 0 else None,
            'voided_date': datetime.strptime(row[25], "%Y-%m-%dT%H:%M:%S") if len(row[25].strip()) > 0 else None,
            'last_probe_date': datetime.strptime(row[26], "%Y-%m-%dT%H:%M:%S") if len(row[26].strip()) > 0 else None
        }

        report['creditcard_transaction_collection'].append(transaction)
Example #10
0
def check_rotation_needed(tree):
    """ Browse the master keys and check their creation date to
        display a warning if older than 6 months (it's time to rotate).
    """
    show_rotation_warning = False
    six_months_ago = datetime.utcnow()-timedelta(days=183)
    if 'kms' in tree['sops']:
        for entry in tree['sops']['kms']:
            # check if creation date is older than 6 months
            if 'created_at' in entry:
                d = datetime.strptime(entry['created_at'],
                                      '%Y-%m-%dT%H:%M:%SZ')
                if d < six_months_ago:
                    show_rotation_warning = True

    if 'pgp' in tree['sops']:
        for entry in tree['sops']['pgp']:
            # check if creation date is older than 6 months
            if 'created_at' in entry:
                d = datetime.strptime(entry['created_at'],
                                      '%Y-%m-%dT%H:%M:%SZ')
                if d < six_months_ago:
                    show_rotation_warning = True
    if show_rotation_warning:
        print("INFO: the data key on this document is over 6 months old. "
              "Considering rotating it with $ sops -r <file> ",
              file=sys.stderr)
Example #11
0
def _parse_datetime_value(value):
    # timezones are not supported and are assumed UTC
    if value[-1] == 'Z':
        value = value[:-1]

    value_len = len(value)
    if value_len in (8, 10):
        value = datetime.strptime(value, '%Y-%m-%d').replace(
            tzinfo=timezone.utc,
        )
        return [value, value + timedelta(days=1)]
    elif value[4] == '-':
        try:
            value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S').replace(
                tzinfo=timezone.utc,
            )
        except ValueError:
            value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f').replace(
                tzinfo=timezone.utc,
            )
    else:
        value = datetime.utcfromtimestamp(float(value)).replace(
            tzinfo=timezone.utc,
        )
    return [value - timedelta(minutes=5), value + timedelta(minutes=6)]
Example #12
0
def sendSubscriber(request):
    results= {}
    if request.method == 'POST':
        subName = request.POST.get('subName')

        subArrival = request.POST.get('subArrival')
        subArrival = datetime.strptime(subArrival, "%d.%m.%Y")
        subDeparture= request.POST.get('subDeparture')
        subDeparture = datetime.strptime(subDeparture, "%d.%m.%Y")
        subSwitch = request.POST.get('subSwitch')
        subOther  = request.POST.get('subOther');

        s = Subscriber()
        s.name = subName
        s.arrival = subArrival
        s.departure = subDeparture
        s.switch = subSwitch
        s.other = subOther

        s.save()

        results['subName'] = subName
        results['subArrival'] = subArrival.strftime("%d.%m.%Y")
        results['subDeparture']=subDeparture.strftime("%d.%m.%Y")
        results['subSwitch'] = subSwitch
        results['subOther'] = subOther

    j = json.dumps(results)
    return HttpResponse(j, content_type='application/json')
Example #13
0
def testformat(s):
    """Parse a date and returns a dict.

    The dict contains the date string, format, and an error message
    if the date cannot be parsed.
    """
    error = ''
    try:
        d = datetime.strptime(s, '%Y-%m-%d')
        dateformat = 'date'
    except:
        try:
            d = datetime.strptime(s, '%Y-%m-%dT%H:%M:%SZ')
            dateformat = 'datetime'
        except:
            dateformat = 'unknown'
    if dateformat == 'unknown':
        try:
            d = dateutil.parser.parse(s)
            # s = d.strftime("%Y-%m-%dT%H:%M:%SZ")
            # dateformat = 'datetime'
            s = d.strftime("%Y-%m-%d")
            dateformat = 'date'
        except:
            error = 'Could not parse date "' + s + '" into a valid format.'
    if error == '':
        response = {'text': s, 'format': dateformat}
    else:
        response = {'text': s, 'format': 'unknown', 'error': error}
    return response
Example #14
0
    def get_params_from_request(self):
        """
        Get params from request GET
        """
        # designer
        designer = self.request.GET.get('designer')
        designer = designer if designer else 'all'

        # status
        status = self.request.GET.get('status')
        status = status if status else 'all'

        # style
        style = self.request.GET.get('style')
        style = style if style else 'all'

        # age group
        age_group = self.request.GET.get('age-group')
        age_group = age_group if age_group else 'all'

        # created time
        try:
            created_from = self.request.GET.get('created-from')
            created_from_dt = datetime.strptime(created_from, '%m/%d/%Y')
        except:
            created_from_dt = None
        try:
            created_to = self.request.GET.get('created-to')
            created_to_dt = datetime.strptime(created_to, '%m/%d/%Y')
        except:
            created_to_dt = None

        return {'status': status, 'style': style, 'age_group': age_group,
                'created_from': created_from_dt, 'created_to': created_to_dt,
                'designer': designer}
Example #15
0
def pesquisarFluxos(request):
    if request.method == 'POST':
        pessoaBusca = request.POST.get('pessoaBusca')
        dataBuscaInicio = datetime.strptime(request.POST.get('dataBuscaInicio', ''), '%d/%m/%Y %H:%M:%S')
        dataBuscaFim = datetime.strptime(request.POST.get('dataBuscaFinal', ''), '%d/%m/%Y %H:%M:%S')
        
        nome = Pessoa.objects.filter(id=pessoaBusca)
        pessoas = Pessoa.objects.all().order_by('nome')

        totalreceber = 0
        totalpagar = 0
       
        try:
            sql = "select * from caixas_conta where pessoa_id like %s  and data >= '%s' and  data <= '%s'" % (pessoaBusca, dataBuscaInicio, dataBuscaFim)
            contas = Conta.objects.raw(sql)

            for item in contas:
                if item.tipo == 'E':
                    totalreceber = totalreceber + item.valor
                else:
                    totalpagar = totalpagar + item.valor

                total = totalreceber - totalpagar
        except:
            contas = [Conta(descricao='erro')]

        return render(request, 'fluxos/listarFluxos.html', {'contas': contas, 'nome':nome, 'pessoas': pessoas, 'totalreceber':totalreceber,'totalpagar':totalpagar, 'total':total})
Example #16
0
    def init(self, cr, uid=1):
        """ This view will be used in dashboard
        The reason writing this code here is, we need to check date range from today to first date of fiscal year.
        """
        pool_obj_fy = self.pool['account.fiscalyear']
        today = time.strftime('%Y-%m-%d')
        fy_id = pool_obj_fy.find(cr, uid, exception=False)
        LIST_RANGES = []
        if fy_id:
            fy_start_date = pool_obj_fy.read(cr, uid, fy_id, ['date_start'])['date_start']
            fy_start_date = datetime.strptime(fy_start_date, '%Y-%m-%d')
            last_month_date = datetime.strptime(today, '%Y-%m-%d') - relativedelta(months=1)

            while (last_month_date > fy_start_date):
                LIST_RANGES.append(today + " to " + last_month_date.strftime('%Y-%m-%d'))
                today = (last_month_date- relativedelta(days=1)).strftime('%Y-%m-%d')
                last_month_date = datetime.strptime(today, '%Y-%m-%d') - relativedelta(months=1)

            LIST_RANGES.append(today +" to " + fy_start_date.strftime('%Y-%m-%d'))
            cr.execute('delete from temp_range')

            for range in LIST_RANGES:
                self.pool['temp.range'].create(cr, uid, {'name':range})

        cr.execute("""
            create or replace view report_aged_receivable as (
                select id,name from temp_range
            )""")
def calc_elapsed_time(params):
	f=params['inputfile']	
	o = '%s_elapsedTime.txt' %(params['inputfile'][:-4])
	o1 = open(o,'w')

	tot=len(open(f,'r').readlines())

	count=1

	while count <=tot-1:

		line = linecache.getline(f,count)
		linenext=linecache.getline(f,count+1)

		if params['debug'] is True:
			print line[:-2]
			print linenext[:-2]

		region1=line.split()[5]
		region2=linenext.split()[5]

		if region1 != region2:
			if params['debug'] is True:
				print '<--------------------------------->Next region..'
			count = count + 1
			continue
	
		t1=datetime.strptime(line.split()[2],'%Y-%m-%dT%H:%M:%S-0500')
		t2=datetime.strptime(linenext.split()[2],'%Y-%m-%dT%H:%M:%S-0500')
		price=line.split()[1]
		if params['debug'] is True:
			print (t1-t2).seconds
		o1.write('%s\t%s\t%f\n' %(region1,price,(t1-t2).seconds))

		count = count + 1
Example #18
0
    def print_report(self, cr, uid, ids, context=None):
        """
         To get the date and print the report
         @param self: The object pointer.
         @param cr: A database cursor
         @param uid: ID of the user currently logged in
         @param context: A standard dictionary
         @return : retrun report
        """
        if context is None:
            context = {}
        datas = {'ids': context.get('active_ids', [])}
        res = self.read(cr, uid, ids, ['date_start', 'date_end', 'location_ids','show_details'], context=context)
        res = res and res[0] or {}

        # get timedelta between user timezone and UTC
        utc_time = datetime.now()
        user_time = fields.datetime.context_timestamp(cr, uid, utc_time).replace(tzinfo=None)
        user_timedelta = utc_time - user_time
        res['date_start2'] = (datetime.strptime(res['date_start']+' 00:00:00',tools.DEFAULT_SERVER_DATETIME_FORMAT) + user_timedelta).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
        res['date_end2'] = (datetime.strptime(res['date_end']+' 23:59:59',tools.DEFAULT_SERVER_DATETIME_FORMAT) + user_timedelta).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)

        datas['form'] = res
        if res.get('id',False):
            datas['ids']=[res['id']]
        return self.pool['report'].get_action(cr, uid, [], 'pos_sales_summary.report_summaryofsales', data=datas, context=context)
    def validate_config(self,config):
        print 'VALIDATE CONFIG'
        if not config:
            return config

        try:
            config_obj = json.loads(config)

            if 'api_version' in config_obj:
                try:
                    int(config_obj['api_version'])
                except ValueError:
                    raise ValueError('api_version must be an integer')

            if 'default_tags' in config_obj:
                if not isinstance(config_obj['default_tags'],list):
                    raise ValueError('default_tags must be a list')

            if 'default_groups' in config_obj:
                if not isinstance(config_obj['default_groups'],list):
                    raise ValueError('default_groups must be a list')

                # Check if default groups exist
                context = {'model':model,'user':c.user}
                for group_name in config_obj['default_groups']:
                    try:
                        group = get_action('group_show')(context,{'id':group_name})
                    except NotFound,e:
                        raise ValueError('Default group not found')

            if 'default_extras' in config_obj:
                if not isinstance(config_obj['default_extras'],dict):
                    raise ValueError('default_extras must be a dictionary')

            if 'from' in config_obj:
                try:
                    datetime.strptime(config_obj['from'], '%Y-%m-%dT%H:%M:%SZ')
                except ValueError:
                    raise ValueError("Incorrect date format, should be yyyy-mm-ddThh:mm:ssZ ")
                #    int(config_obj['from'])

            if 'until' in config_obj:
                try:
                    datetime.strptime(config_obj['until'], '%Y-%m-%dT%H:%M:%SZ')
                except ValueError:
                    raise ValueError("Incorrect date format, should be yyyy-mm-ddThh:mm:ssZ ")

            #if 'vocabulary' in config_obj:
            #    if config_obj['vocabulary'] != 'metashare' and config_obj['vocabulary'] != 'olac' and config_obj['vocabulary'] !='cmdi':
            #        raise ValueError("Incorrect vocabulary, please choose between metashare, olac and cmdi")
            #else:
            #    raise ValueError("Please provide a vocabulary, you can choose between metashare, olac and cmdi")

            if 'user' in config_obj:
                # Check if user exists
                context = {'model':model,'user':c.user}
                try:
                    user = get_action('user_show')(context,{'id':config_obj.get('user')})
                except NotFound,e:
                    raise ValueError('User not found')
Example #20
0
def event_adjecents(request, start_date, start_time, end_date, end_time, event=None, locations=None):
    """Returns the events which take place adjacent to the to-be-planned event."""

    start = datetime.strptime("%s %s" % (start_date, start_time), "%d-%m-%Y %H:%M") - timedelta(minutes=15)
    end = datetime.strptime("%s %s" % (end_date, end_time), "%d-%m-%Y %H:%M") + timedelta(minutes=15)
    realstart = datetime.strptime("%s %s" % (start_date, start_time), "%d-%m-%Y %H:%M")
    realend = datetime.strptime("%s %s" % (end_date, end_time), "%d-%m-%Y %H:%M")

    # Haal alle conflicting events op met een kwartier speling aan beide
    # einden. Haal vervolgens de de echte conflicting events eruit, zodat de
    # adjacent events overblijven.
    if locations:
        locations = Location.objects.filter(pk__in=locations)
        events = Event.objects.none()
        adjevents = Event.objects.none()
        for location in locations:
            events |= Event.conflicting_events(realstart, realend, location)
            adjevents |= Event.conflicting_events(start, end, location)
    else:
        events = Event.conflicting_events(realstart, realend)
        adjevents = Event.conflicting_events(start, end)

    if event:
        events = events.exclude(pk=event)

    result = []
    for event in adjevents:
        if event not in events:
            result.append(model_to_dict(event))

    return result
Example #21
0
def load_movies():
    """Load movies from u.item into database."""

    print "Movie"

    Movie.query.delete()

    for row in open("seed_data/u.item"):
        row = row.rstrip().split('|')
        movie_id = row[0]
        #eliminate date at end of title
        title_temp = row[1].split("(")
        title = title_temp[0].rstrip()
        # make sure data doesn't suck
        try:
            released_at = datetime.strptime(row[2], "%d-%b-%Y")
        except: 
            released_at = datetime.strptime('01-Jan-1900', "%d-%b-%Y")
        imdb_url = row[3]

        movie = Movie(movie_id=movie_id,
                      title=title,
                      released_at=released_at,
                      imdb_url=imdb_url)

        db.session.add(movie)

    db.session.commit()
Example #22
0
def hotspotsRange(start_time, stop_time, location, **kwargs):
    ''' Run ofver a range of timesteps at 5 minute intervals in between '''
    start = datetime.strptime(start_time, '%Y%m%d.%H%M%S')
    stop = datetime.strptime(stop_time, '%Y%m%d.%H%M%S')
    kwargs.update({'task_id': hotspotsRange.request.id})
    subtasks = [ send_task("cybercomq.gis.hotspotpysal.hotspots", args=(ts,location), kwargs=kwargs, queue="gis", track_started=True).task_id for ts in date_range(start,stop) ]
    return subtasks
 def getConferenceSessionsByFilters(self, request):
     """Query for the sessions that are not specified type
     and are not running after specified time
     """
     # first get the sessions that don't have the specified type
     # then get the sessions that don't run past the specified time
     type_f = request.websafeType
     print type_f
     print '------------------------------'
     time_f = request.websafeTime
     print time_f
     print '------------------------------'
     eq_f = request.websafeOperator
     print eq_f
     print '------------------------------'
     q = ConferenceSession.query(ConferenceSession.type != type_f)
     print q
     sessions=[]
     for sess in q:
         if sess.start_time is not None:
             if eq_f == '<':
                 if sess.start_time < datetime.strptime(time_f, '%H:%M').time():
                     # add the session to the sessions
                     sessions.append(sess)
             elif eq_f == '=':
                 if sess.start_time == datetime.strptime(time_f, '%H:%M').time():
                     # add the session to the sessions
                     sessions.append(sess)
             elif eq_f == '>':
                 if sess.start_time > datetime.strptime(time_f, '%H:%M').time():
                     # add the session to the sessions
                     sessions.append(sess)
     return ConferenceSessionForms(
         items=[self._copySessionToForm(s) for s in sessions]
     )
Example #24
0
 def calc_period_prec(self, cr, uid, fiscal_year, context={}):
     fiscalyear_obj = self.pool['account.fiscalyear']
     date_start = (
         str(datetime.strptime(fiscal_year.date_start, '%Y-%m-%d') -
             relativedelta(years=1))[:10])
     date_stop = (
         str(datetime.strptime(fiscal_year.date_stop, '%Y-%m-%d') -
             relativedelta(years=1))[:10])
     id_fy = fiscalyear_obj.search(
         cr, uid,
         [('date_start', '=', date_start), ('date_stop', '=', date_stop)])
     if not id_fy:
         raise osv.except_osv(
             _('Error!'),
             _('Not find the previous exercise for period %s - %s' % (
               date_start, date_stop)))
     # Anno Successivo
     date_start = (
         str(datetime.strptime(fiscal_year.date_start, '%Y-%m-%d') +
             relativedelta(years=1))[:10])
     date_stop = (
         str(datetime.strptime(fiscal_year.date_stop, '%Y-%m-%d') +
             relativedelta(years=1))[:10])
     id_fy1 = fiscalyear_obj.search(
         cr, uid,
         [('date_start', '=', date_start), ('date_stop', '=', date_stop)])
     if not id_fy1:
         raise osv.except_osv(
             _('Error!'),
             _('Not find the next exercise for period %s - %s' % (
               date_start, date_stop)))
     return id_fy[0], id_fy1[0]
Example #25
0
def is_date(s, f):
    try:
        datetime.strptime(s, f)
    except ValueError:
        return False
    else:
        return True
def __validateArgs():
#===============================================================================
  if len(sys.argv) < 5:
    print "python",sys.argv[0], "CIK ALIAS SINCE UNTIL"
    print "where CIK: one platform client key"
    print "    ALIAS: dataport alias"
    print "    SINCE: MM/DD/YYYY"
    print "    UNTIL: MM/DD/YYYY"
    sys.exit(1)
  cik, alias, since, until = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]
  if len(cik) != 40:
    print "Invalid cik"
    sys.exit(1)
  since = since + " 00:00:00"
  until = until + " 23:59:59"
  try:
    start = datetime.strptime(since, "%m/%d/%Y %H:%M:%S")
    end   = datetime.strptime(until, "%m/%d/%Y %H:%M:%S")
  except ValueError as err:
    print "Invalid time format."
    sys.exit(1)
  start_timestamp = int(time.mktime(start.timetuple()))
  end_timestamp = int(time.mktime(end.timetuple()))
  if start_timestamp > end_timestamp:
    print "SINCE must not be greater than UNTIL"
    sys.exit(1)
  return cik, alias, start_timestamp, end_timestamp
Example #27
0
    def test_podcast_file_sync(self):
        # download only one podcast episode
        with utils.temp_podcast(self.client, archive_type='soundcloud', max_allowed=1) as podcast:
            url = urls.soundcloud_track_list(podcast['broadcast_id'],
                                             self.client.soundcloud_client_id)
            httpretty.register_uri(httpretty.GET, url, body=json.dumps(soundcloud_one_track.DATA))
            self.client.episode_sync()

            episode_list = self.client.episode_list(only_files=False)
            with utils.temp_audio_file() as mp3_body:
                utils.mock_mp3_download(episode_list[0]['download_url'], mp3_body)
                self.client.podcast_file_sync()
                episode_list = self.client.episode_list()
                self.assert_not_none(episode_list[0]['file_path'])
                first_episode_date = episode_list[0]['date']
                # add an additional, newer podcast, make sure things are deleted
                url = urls.soundcloud_track_list(podcast['broadcast_id'],
                                                 self.client.soundcloud_client_id)
                httpretty.register_uri(httpretty.GET, url, body=json.dumps(soundcloud_two_tracks.DATA))
                self.client.episode_sync()
                episode_list = self.client.episode_list(only_files=False)
                with utils.temp_audio_file() as mp3_body:
                    utils.mock_mp3_download(episode_list[1]['download_url'], mp3_body)
                    self.client.podcast_file_sync()

                    # make sure 2 episodes in db, but only 1 with a file path
                    episode_list = self.client.episode_list()
                    self.assert_not_none(episode_list[0]['file_path'])
                    all_episodes = self.client.episode_list(only_files=False)
                    self.assertNotEqual(len(episode_list), len(all_episodes))
                    second_episode_date = episode_list[0]['date']

                    self.assertTrue(datetime.strptime(second_episode_date, self.client.datetime_output_format) >
                                    datetime.strptime(first_episode_date, self.client.datetime_output_format))
Example #28
0
    def _process_validation_times(self, x509):
        """
        PyOpenSSL uses a kooky date format that *usually* parses out quite
        easily but on the off chance that it's not in UTC, a lot of work needs
        to be done.
        """

        valid_from  = x509.get_notBefore()
        valid_until = x509.get_notAfter()

        try:
            self.valid_from = pytz.UTC.localize(datetime.strptime(
                valid_from,
                self.TIME_FORMAT
            ))
        except ValueError:
            self.valid_from  = self._process_nonstandard_time(valid_from)

        try:
            self.valid_until = pytz.UTC.localize(datetime.strptime(
                valid_until,
                self.TIME_FORMAT
            ))
        except ValueError:
            self.valid_until = self._process_nonstandard_time(valid_until)
    def onchange_fiscalyear(self, cr, uid, ids, fiscalyear_id=False, context=None):
        print_state = 'draft'
        date_move_line_from = date_move_line_from_view = False
        date_move_line_to = False
        if fiscalyear_id:
            print_state = 'print'
            fiscalyear_data = self._get_account_fiscalyear_data(cr, uid, ids, fiscalyear_id)
            #set values
            today_date = date.today()
            date_start = datetime.strptime(fiscalyear_data.date_start,"%Y-%m-%d").date() 
            date_stop = datetime.strptime(fiscalyear_data.date_stop,"%Y-%m-%d").date() 
            #set date_move_line_from
            if fiscalyear_data.date_last_print:
                date_last_print = datetime.strptime(fiscalyear_data.date_last_print,"%Y-%m-%d").date()
                date_move_line_from = date_move_line_from_view = (date_last_print+timedelta(days=1)).__str__()
                if date_last_print == date_stop:
                    date_move_line_from = date_move_line_from_view = date_start.__str__()
                    print_state = 'printed'
            else:
                date_move_line_from = date_move_line_from_view = date_start.__str__()
            #set date_move_line_to
            if today_date > date_stop:
                date_move_line_to = date_stop.__str__()
            else:
                date_move_line_to = (today_date-timedelta(days=1)).__str__()

        return {'value': {
                    'date_move_line_from': date_move_line_from,
                    'date_move_line_from_view': date_move_line_from_view,
                    'date_move_line_to': date_move_line_to,
                    'print_state': print_state,
                    }
                }
Example #30
0
 def _compute_board_amount(self, sequence, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date):
     amount = 0
     if sequence == undone_dotation_number:
         amount = residual_amount
     else:
         if self.method == 'linear':
             amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids))
             if self.prorata:
                 amount = amount_to_depr / self.method_number
                 if sequence == 1:
                     if self.method_period % 12 != 0:
                         date = datetime.strptime(self.date, '%Y-%m-%d')
                         month_days = calendar.monthrange(date.year, date.month)[1]
                         days = month_days - date.day + 1
                         amount = (amount_to_depr / self.method_number) / month_days * days
                     else:
                         days = (self.company_id.compute_fiscalyear_dates(depreciation_date)['date_to'] - depreciation_date).days + 1
                         amount = (amount_to_depr / self.method_number) / total_days * days
         elif self.method == 'degressive':
             amount = residual_amount * self.method_progress_factor
             if self.prorata:
                 if sequence == 1:
                     if self.method_period % 12 != 0:
                         date = datetime.strptime(self.date, '%Y-%m-%d')
                         month_days = calendar.monthrange(date.year, date.month)[1]
                         days = month_days - date.day + 1
                         amount = (residual_amount * self.method_progress_factor) / month_days * days
                     else:
                         days = (self.company_id.compute_fiscalyear_dates(depreciation_date)['date_to'] - depreciation_date).days + 1
                         amount = (residual_amount * self.method_progress_factor) / total_days * days
     return amount
Example #31
0
    def create_new_database(self, dbname=None, client_id=None, partner_id=None, user_id=None, notify_user=False, trial=False, support_team_id=None):
        self.ensure_one()
        db_count = self.env['saas_portal.client'].search_count([('partner_id', '=', partner_id),
                                                                ('state', '=', 'open'),
                                                                ('plan_id', '=', self.id)])
        if self.maximum_allowed_db_per_partner != 0 and db_count >= self.maximum_allowed_db_per_partner:
            raise MaximumDBException

        server = self.server_id
        if not server:
            server = self.env['saas_portal.server'].get_saas_server()

        server.action_sync_server()

        vals = {'name': dbname or self.generate_dbname()[0],
                'server_id': server.id,
                'plan_id': self.id,
                'partner_id': partner_id,
                'trial': trial,
                'support_team_id': support_team_id,
                }
        client = None
        if client_id:
            vals['client_id'] = client_id
            client = self.env['saas_portal.client'].search([('client_id', '=', client_id)])

        vals = self._new_database_vals(vals)[0]

        if client:
            client.write(vals)
        else:
            client = self.env['saas_portal.client'].create(vals)
        client_id = client.client_id

        scheme = server.request_scheme
        port = server.request_port
        if user_id:
            owner_user = self.env['res.users'].browse(user_id)
        else:
            owner_user = self.env.user
        owner_user_data = {
            'user_id': owner_user.id,
            'login': owner_user.login,
            'name': owner_user.name,
            'email': owner_user.email,
        }
        trial_expiration_datetime = datetime.strptime(client.create_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=self.expiration)  # for trial
        state = {
            'd': client.name,
            'e': trial and trial_expiration_datetime or client.create_date,
            'r': '%s://%s:%s/web' % (scheme, client.name, port),
            'owner_user': owner_user_data,
            't': client.trial,
        }
        if self.template_id:
            state.update({'db_template': self.template_id.name})
        scope = ['userinfo', 'force_login', 'trial', 'skiptheuse']
        url = server._request_server(path='/saas_server/new_database',
                              scheme=scheme,
                              port=port,
                              state=state,
                              client_id=client_id,
                              scope=scope,)[0]
        res = requests.get(url, verify=(self.server_id.request_scheme == 'https' and self.server_id.verify_ssl))
        if res.status_code != 200:
            # TODO /saas_server/new_database show more details here
            raise exceptions.Warning('Error %s' % res.status_code)
        data = simplejson.loads(res.text)
        params = {
            'state': data.get('state'),
            'access_token': client.oauth_application_id._get_access_token(user_id, create=True),
        }
        url = '{url}?{params}'.format(url=data.get('url'), params=werkzeug.url_encode(params))

        # send email
        if notify_user:
            template = self.env.ref('saas_portal.email_template_create_saas')
            client.message_post_with_template(template.id, composition_mode='comment')

        if trial:
            client.expiration_datetime = trial_expiration_datetime
        client.send_params_to_client_db()
        client.server_id.action_sync_server()

        return {'url': url, 'id': client.id, 'client_id': client_id}
Example #32
0
    def calculate_integrated_sliding2(self, integration_factor, output_images):
        file_temp = h5py.File(self.results_folder + "/mask.hdf5", 'r')
        dset = file_temp["Mask"]
        mask = dset[...]
        file_temp.close()

        new_clean_data_list = []

        for index, element in enumerate(self.clean_data_list):
            if index == len(self.clean_data_list) - 1:
                break
            new_clean_data_list.append(
                [self.clean_data_list[index], self.clean_data_list[index + 1]])

        n = len(new_clean_data_list) - (len(new_clean_data_list) %
                                        integration_factor)
        new_clean_data_list = new_clean_data_list[:n]
        groups_list = [
            new_clean_data_list[i:i + integration_factor]
            for i in range(0, len(new_clean_data_list), integration_factor)
        ]

        integrated_sliding2_results_folder = self.results_folder + "/integrated_sliding2_factor_%d" % (
            integration_factor)
        if not os.path.exists(integrated_sliding2_results_folder):
            os.makedirs(integrated_sliding2_results_folder)

        integrated_sliding2_len = len(groups_list) - 1
        mag_mean = np.zeros(integrated_sliding2_len)
        phase_mean = np.zeros(integrated_sliding2_len)
        phase_mean_sum = np.zeros(integrated_sliding2_len)
        phase_std_dev = np.zeros(integrated_sliding2_len)
        date_values = []

        vmax = np.pi * self.lambda_d
        vmin = -1 * vmax

        for enum, group in enumerate(groups_list):
            if enum == len(groups_list) - 1:
                break

            for index, element in enumerate(groups_list[enum]):
                data0 = self.sar_collection.find_one({'take': str(element[0])})
                file_temp0 = h5py.File(data0['route'], 'r')
                dset0 = file_temp0["Complex_image"]
                data1 = self.sar_collection.find_one({'take': str(element[1])})
                file_temp1 = h5py.File(data1['route'], 'r')
                dset1 = file_temp1["Complex_image"]

                if index == 0:
                    Imagen = np.sqrt(dset0[...] * np.conj(dset1[...]))
                    date_begin = data0['date']
                    time_begin = data0['time']
                else:
                    Imagen += np.sqrt(dset0[...] * np.conj(dset1[...]))
                file_temp0.close()
                file_temp1.close()
                if index == len(group) - 1:
                    date = data1['date']
                    time = data1['time']
            Imagen /= integration_factor

            phase = np.angle(Imagen)
            magnitude = np.absolute(Imagen)
            masked_angle = np.ma.masked_where(mask == 0, phase)
            masked_magnitude = np.ma.masked_where(mask == 0, magnitude)
            masked_plot = np.ma.masked_where(mask == 1, magnitude)

            mag_mean[enum] = masked_magnitude.mean()
            phase_mean[enum] = masked_angle.mean() * self.lambda_d
            phase_mean_sum[enum] = np.sum(phase_mean)
            phase_std_dev[enum] = np.std(masked_angle) * self.lambda_d
            date_values.append(
                datetime.strptime(''.join((date_begin, time_begin)), ''.join(
                    (date_format, time_format))))

            if output_images:
                fig = plt.figure(1)
                plt.title(
                    "Integration factor: {integration_factor} \n From {date_begin} {time_begin} \n to {date} {time} (UTC)"
                    .format(integration_factor=integration_factor,
                            date_begin=date_begin,
                            time_begin=time_begin,
                            date=date,
                            time=time),
                    fontsize=11)
                plt.ylabel('Range (m)', fontsize=12)
                plt.xlabel('Cross-range (m)', fontsize=12)

                if enum == 0:
                    im = plt.imshow(
                        self.lambda_d * phase,
                        cmap='jet',
                        aspect='auto',
                        extent=[self.xi, self.xf, self.yi, self.yf],
                        vmin=vmin,
                        vmax=vmax)
                    cbar = plt.colorbar(im,
                                        orientation='vertical',
                                        format='%.2f')
                    cbar.ax.set_title('Displacement \n (mm)', fontsize=10)

                    aux = int(
                        (self.yf - (self.xf * np.tan(48.0 * np.pi / 180.0))) *
                        Imagen.shape[0] / (self.yf - self.yi))
                    mask_aux = np.zeros(Imagen.shape)

                    count = 0
                    nposx = Imagen.shape[1]
                    nposy = Imagen.shape[0]

                    for k in range(nposy):
                        if k >= (aux + 1):
                            mask_aux[k, 0:count] = 1
                            mask_aux[k, nposx - count - 1:nposx - 1] = 1
                            count = count + 1
                    masked_values = np.ma.masked_where(mask_aux == 0, mask_aux)
                    plt.imshow(masked_values,
                               cmap='binary',
                               aspect='auto',
                               extent=[self.xi, self.xf, self.yi, self.yf])
                    plt.imshow(masked_plot,
                               cmap='binary',
                               aspect='auto',
                               extent=[self.xi, self.xf, self.yi, self.yf])

                if enum > 0:
                    im = plt.imshow(
                        self.lambda_d * phase,
                        cmap='jet',
                        aspect='auto',
                        extent=[self.xi, self.xf, self.yi, self.yf],
                        vmin=vmin,
                        vmax=vmax)
                    plt.imshow(masked_values,
                               cmap='binary',
                               aspect='auto',
                               extent=[self.xi, self.xf, self.yi, self.yf])
                    plt.imshow(masked_plot,
                               cmap='binary',
                               aspect='auto',
                               extent=[self.xi, self.xf, self.yi, self.yf])

                plt.savefig(integrated_sliding2_results_folder +
                            "/groups_%d_%d.png" % (enum, enum + 1))

        fig = plt.figure(1)
        fig.clear()
        fig = plt.figure(figsize=(10.0, 6.0))

        plt.subplot(221)
        plt.title(r'$\overline{\Delta r}\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\overline{\Delta r}\/\/(mm)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_mean)
        ax = plt.gca()
        ax.set_ylim(
            [-(np.amax(phase_mean) * 2.0), (np.amax(phase_mean) * 2.0)])
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.subplot(222)
        plt.title(r'$\overline{\Delta r_{acc}}\/\/(mm)\/\/vs\/\/time$',
                  fontsize=16)
        plt.ylabel(r'$\overline{\Delta r}\/\/(acc)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_mean_sum)
        ax = plt.gca()
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        sub = plt.subplot(223)
        plt.title(r'$\sigma_{\Delta r}\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\sigma_{\Delta r}\/\/(mm)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_std_dev)
        ax = plt.gca()
        ax.set_ylim([0.0, np.amax(phase_std_dev) * 1.2])
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.subplot(224)
        plt.title(r'$\overline{mag}\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\overline{mag}$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        #plt.plot(date_values, mag_mean * 2 * 1e6)
        plt.plot(date_values, mag_mean)
        ax = plt.gca()
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        #ax.set_ylim([0.0 , np.amax(mag_mean * 2 * 1e6) * 1.2])
        ax.set_ylim([0.0, np.amax(mag_mean) * 1.2])
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.tight_layout()
        plt.savefig(integrated_sliding2_results_folder +
                    "/statistical_report.png")
        print "Done!"
Example #33
0
    def calculate_integrated_sliding(self, integration_factor):
        file_temp = h5py.File(self.results_folder + "/mask.hdf5", 'r')
        dset = file_temp["Mask"]
        mask = dset[...]
        file_temp.close()

        n = len(self.clean_data_list) - (len(self.clean_data_list) %
                                         integration_factor)
        self.clean_data_list = self.clean_data_list[:n]
        groups_list = [
            self.clean_data_list[i:i + integration_factor]
            for i in range(0, len(self.clean_data_list), integration_factor)
        ]

        integrated_sliding_results_folder = self.results_folder + "/integrated_sliding_factor_%d" % (
            integration_factor)
        if not os.path.exists(integrated_sliding_results_folder):
            os.makedirs(integrated_sliding_results_folder)

        integrated_sliding_len = len(groups_list) - 1
        mag_mean = np.zeros(integrated_sliding_len)
        phase_mean = np.zeros(integrated_sliding_len)
        phase_mean_sum = np.zeros(integrated_sliding_len)
        phase_std_dev = np.zeros(integrated_sliding_len)
        date_values = []

        fig = plt.figure(1)

        vmax = np.pi * self.lambda_d
        vmin = -1 * vmax

        for enum, group in enumerate(groups_list):
            if enum == len(groups_list) - 1:
                break
            for index, element in enumerate(groups_list[enum]):
                data = self.sar_collection.find_one({'take': str(element)})
                file_temp = h5py.File(data['route'], 'r')
                dset = file_temp["Complex_image"]
                if index == 0:
                    Imagen_master = dset[...]
                    file_temp.close()
                    continue
                Imagen_master += dset[...]
                file_temp.close()
            Imagen_master /= integration_factor

            for index, element in enumerate(groups_list[enum + 1]):
                data = self.sar_collection.find_one({'take': str(element)})
                file_temp = h5py.File(data['route'], 'r')
                dset = file_temp["Complex_image"]
                if index == 0:
                    Imagen_slave = dset[...]
                    file_temp.close()
                    continue
                Imagen_slave += dset[...]
                file_temp.close()
                if index == len(group) - 1:
                    date = (data['date'])
                    time = (data['time'])
            Imagen_slave /= integration_factor

            phase = np.angle(Imagen_master * np.conj(Imagen_slave))
            magnitude = np.absolute(Imagen_master)
            masked_angle = np.ma.masked_where(mask == 0, phase)
            masked_magnitude = np.ma.masked_where(mask == 0, magnitude)

            mag_mean[enum] = masked_magnitude.mean()
            phase_mean[enum] = masked_angle.mean() * self.lambda_d
            phase_mean_sum[enum] = np.sum(phase_mean)
            phase_std_dev[enum] = np.std(masked_angle) * self.lambda_d
            date_values.append(
                datetime.strptime(''.join((date, time)), ''.join(
                    (date_format, time_format))))

            #fig.suptitle("Image %d and %d" %(take, take + 1))
            plt.ylabel('Range (m)', fontsize=14)
            plt.xlabel('Cross-range (m)', fontsize=14)

            if enum == 0:
                im = plt.imshow(self.lambda_d * phase,
                                cmap='jet',
                                aspect='auto',
                                extent=[self.xi, self.xf, self.yi, self.yf],
                                vmin=vmin,
                                vmax=vmax)
                cbar = plt.colorbar(im, orientation='vertical', format='%.2f')
            im = plt.imshow(self.lambda_d * phase,
                            cmap='jet',
                            aspect='auto',
                            extent=[self.xi, self.xf, self.yi, self.yf],
                            vmin=vmin,
                            vmax=vmax)
            plt.savefig(integrated_sliding_results_folder +
                        "/groups_%d_%d_takes%s_%s.png" %
                        (enum, enum + 1, tuple(groups_list[enum]),
                         tuple(groups_list[enum + 1])))

        fig.clear()
        fig = plt.figure(figsize=(15.0, 8.0))

        plt.subplot(221)
        plt.title(r'$\overline{\Delta r}\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\overline{\Delta r}\/\/(mm)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_mean)
        ax = plt.gca()
        ax.set_ylim(
            [-(np.amax(phase_mean) * 2.0), (np.amax(phase_mean) * 2.0)])
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.subplot(222)
        plt.title(r'$\overline{\Delta r}\/\/(acc)\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\overline{\Delta r}\/\/(acc)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_mean_sum)
        ax = plt.gca()
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        sub = plt.subplot(223)
        plt.title(r'$\sigma_{\Delta r}\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\sigma_{\Delta r}\/\/(mm)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_std_dev)
        ax = plt.gca()
        ax.set_ylim([0.0, np.amax(phase_std_dev) * 1.2])
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.subplot(224)
        plt.title(r'$\overline{mag}\/\/vs\/\/time\/\/(normalized)$',
                  fontsize=16)
        plt.ylabel(r'$\overline{mag}$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, mag_mean * 2)
        ax = plt.gca()
        ax.set_ylim([0.0, np.amax(mag_mean * 2) * 1.2])
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.tight_layout()
        plt.savefig(integrated_sliding_results_folder +
                    "/statistical_report.png")
        print "Done!"
Example #34
0
    def clean_data_report(self, threshold):
        self.clean_data_list = []
        file_temp = h5py.File(self.results_folder + "/mask.hdf5", 'r')
        dset = file_temp["Mask"]
        mask = dset[...]
        file_temp.close()
        clean_data_set = set()

        for take in range(1, self.ntakes):
            data = self.sar_collection.find_one({'take': str(take)})
            file_temp = h5py.File(data['route'], 'r')
            dset = file_temp["Complex_image"]
            Imagen_master = dset[...]
            file_temp.close()

            data = self.sar_collection.find_one({'take': str(take + 1)})
            date = (data['date'])
            time = (data['time'])
            file_temp = h5py.File(data['route'], 'r')
            dset = file_temp["Complex_image"]
            Imagen_slave = dset[...]
            file_temp.close()

            phase = np.angle(Imagen_master * np.conj(Imagen_slave))
            masked_angle = np.ma.masked_where(mask == 0, phase)
            phase_std_dev = np.std(masked_angle) * self.lambda_d

            if phase_std_dev < threshold:
                #clean_data_list.append([take, take +1])
                clean_data_set.add(take)
                clean_data_set.add(take + 1)

        self.clean_data_list = list(clean_data_set)

        clean_results_folder = self.results_folder + "/clean_data"
        if not os.path.exists(clean_results_folder):
            os.makedirs(clean_results_folder)

        report_len = len(self.clean_data_list) - 1
        mag_mean = np.zeros(report_len)
        phase_mean = np.zeros(report_len)
        phase_mean_sum = np.zeros(report_len)
        phase_std_dev = np.zeros(report_len)
        date_values = []

        fig = plt.figure(1)

        vmax = np.pi * self.lambda_d
        vmin = -1 * vmax

        for index, element in enumerate(self.clean_data_list):
            if index == len(self.clean_data_list) - 1:
                break
            master = self.clean_data_list[index]
            slave = self.clean_data_list[index + 1]

            print "Processing take %d and %d." % (master, slave)

            data = self.sar_collection.find_one({'take': str(master)})
            file_temp = h5py.File(data['route'], 'r')
            dset = file_temp["Complex_image"]
            Imagen_master = dset[...]
            file_temp.close()

            data = self.sar_collection.find_one({'take': str(slave)})
            date = (data['date'])
            time = (data['time'])
            file_temp = h5py.File(data['route'], 'r')
            dset = file_temp["Complex_image"]
            Imagen_slave = dset[...]
            file_temp.close()

            phase = np.angle(Imagen_master * np.conj(Imagen_slave))
            magnitude = np.absolute(Imagen_master)
            masked_angle = np.ma.masked_where(mask == 0, phase)
            masked_magnitude = np.ma.masked_where(mask == 0, magnitude)

            mag_mean[index] = masked_magnitude.mean()
            phase_mean[index] = masked_angle.mean() * self.lambda_d
            phase_mean_sum[index] = np.sum(phase_mean)
            phase_std_dev[index] = np.std(masked_angle) * self.lambda_d
            date_values.append(
                datetime.strptime(''.join((date, time)), ''.join(
                    (date_format, time_format))))

        fig.clear()
        fig = plt.figure(figsize=(15.0, 8.0))

        plt.subplot(221)
        plt.title(r'$\overline{\Delta r}\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\overline{\Delta r}\/\/(mm)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_mean)
        ax = plt.gca()
        ax.set_ylim(
            [-(np.amax(phase_mean) * 2.0), (np.amax(phase_mean) * 2.0)])
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.subplot(222)
        plt.title(r'$\overline{\Delta r}\/\/(acc)\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\overline{\Delta r}\/\/(acc)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_mean_sum)
        ax = plt.gca()
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        sub = plt.subplot(223)
        plt.title(r'$\sigma_{\Delta r}\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\sigma_{\Delta r}\/\/(mm)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_std_dev)
        ax = plt.gca()
        ax.set_ylim([0.0, np.amax(phase_std_dev) * 1.2])
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.subplot(224)
        plt.title(r'$\overline{mag}\/\/vs\/\/time\/\/(normalized)$',
                  fontsize=16)
        plt.ylabel(r'$\overline{mag}$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, mag_mean)
        ax = plt.gca()
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.tight_layout()
        plt.savefig(clean_results_folder + "/statistical_report.png")

        print "Done!"
Example #35
0
    def process_data(self,
                     xi,
                     xf,
                     yi,
                     yf,
                     dx,
                     dy,
                     R0=0.0,
                     ifft_fact=8,
                     win=False):
        #grid extension: [(xi, xf), (yi, yf)]
        #grid resolution: dx and dy
        self.xi = xi
        self.xf = xf
        self.yi = yi
        self.yf = yf
        self.dx = dx
        self.dy = dy

        results_folder = "/home/andre/sar_processed_data/imaging/" + self.collection_name

        if not os.path.exists(results_folder):
            os.makedirs(results_folder)

        processed_data_db = self.db_client['sar_processed_data']
        sar_processed_data_collection = processed_data_db[self.collection_name]

        if self.algorithm == "range_migration":
            range_res = 2**11
            cross_range_res = 2**13
            rows = int((max(cross_range_res, self.nx) - self.nx) / 2)
            cols = int((max(range_res, self.nfre) - self.nfre) / 2)
            cols_left = cols
            cols_right = cols
            rows_up = rows
            rows_down = rows

            if not (self.nx % 2) == 0:
                rows_down = rows + 1

            if not (self.nfre % 2) == 0:
                cols_left = cols + 1

            rs = (self.yf + self.yi) / 2.0
            #rs = 100.0

            s0 = self.calculate_matched_filter(rs, range_res, cross_range_res)

            for take in range(1, self.ntakes + 1):
                print "Processing %d out of %d." % (take, self.ntakes)
                s21 = np.empty([self.nx, self.nfre], dtype=np.complex64)
                data = self.sar_collection.find({'take_number': str(take + 1)})

                for position in range(self.nx):
                    print "Take %d, position %d" % (take, position)
                    data_real = literal_eval(
                        data[position]['data']['data_real'])
                    data_imag = literal_eval(
                        data[position]['data']['data_imag'])
                    s21[position, :] = np.array(
                        data_real)[0] + 1j * np.array(data_imag)[0]

                if win:
                    s21 = self.hanning(s21)

                s21 = np.pad(s21,
                             [[rows_up, rows_down], [cols_left, cols_right]],
                             'constant',
                             constant_values=0)

                dt = json.loads(data[self.nx - 1]['datetime'],
                                object_hook=json_util.object_hook)
                date = str(dt.date())
                time = str(dt.time().strftime("%H:%M:%S"))

                self.rm_algorithm(s21, s0, take, date, time)

        if self.algorithm == "terrain_mapping":
            self.nposx = int(np.ceil(
                (xf - xi) / dx) + 1)  #number of positions axis x
            self.nposy = int(np.ceil(
                (yf - yi) / dy) + 1)  #number of positions axis y
            self.xf = self.xi + self.dx * (self.nposx - 1
                                           )  #recalculating x final position
            self.yf = self.yi + self.dy * (self.nposy - 1
                                           )  #recalculating y final position
            self.npos = self.nposx * self.nposy  #total of positions
            self.nr = 2**int(np.ceil(np.log2(
                self.nfre * ifft_fact)))  #calculate a number of ranges,
            #considering the zero padding
            n = np.arange(self.nr)  #final number of ranges
            B = self.df * self.nr  #final bandwidth
            dr = c0 / (2 * B)  #recalculate resolution, considering ifft_fact
            self.rn = dr * n  #range vector
            self.R = dr * self.nr  #for the period verb for the interp

            xa = self.xai + self.dax * np.arange(
                self.nx)  #antenna positions vector
            xn = self.xi + self.dx * np.arange(self.nposx)  #grid vector axis x
            yn = self.yi + self.dy * np.arange(self.nposy)  #grid vector axis y

            sar_processed_data_collection.delete_many({})

            Rnk = self.calculate_Rnk(
                xn, yn,
                xa)  #vector of distance from the antenna positions to the grid
            Rnk_folder = results_folder + "/Rnk.hdf5"
            f = h5py.File(Rnk_folder, 'w')
            dset = f.create_dataset("Rnk", (Rnk.shape), dtype=np.float32)
            dset[...] = Rnk
            f.close()
            post = {
                "type": "parameters",
                "xi": self.xi,
                "xf": self.xf,
                "yi": self.yi,
                "yf": self.yf,
                "dx": self.dx,
                "dy": self.dy,
                "fi": self.fre_min,
                "ff": self.fre_max,
                "nfre": self.nfre,
                "ifft_fact": ifft_fact,
                "window": win,
                "Rnk_folder": Rnk_folder
            }

            sar_processed_data_collection.insert(post)

            parameters = sar_processed_data_collection.find_one(
                {'type': 'parameters'})
            file_temp = h5py.File(parameters['Rnk_folder'], 'r')
            dset = file_temp["Rnk"]
            Rnk = dset[...]
            file_temp.close()

            starting_take = 1
            '''
			if sar_processed_data_collection.find().count() is not 0:
				parameters = sar_processed_data_collection.find_one({'type' : 'parameters'})

				if (self.xi == parameters['xi']) and (self.xf == parameters['xf']) and (self.yi == parameters['yi']) and (self.yf == parameters['yf']):
					file_temp = h5py.File(parameters['Rnk_folder'], 'r')
					dset = file_temp["Rnk"]
					Rnk = dset[...]
					file_temp.close()
					starting_take = len(self.sar_collection.find().distinct('take'))
				else:
					sar_processed_data_collection.delete_many({})

			if sar_processed_data_collection.find().count() is 0:
				Rnk = self.calculate_Rnk(xn, yn ,xa) #vector of distance from the antenna positions to the grid
				Rnk_folder = results_folder + "/Rnk.hdf5"
				f = h5py.File(Rnk_folder, 'w')
				dset = f.create_dataset("Rnk", (Rnk.shape), dtype = np.float32)
				dset[...] = Rnk
				f.close()
				post = {"type": "parameters",
						"xi": self.xi,
	                    "xf": self.xf,
	            	    "yi": self.yi,
						"yf": self.yf,
						"dx": self.dx,
						"dy": self.dy,
						"ifft_fact": ifft_fact,
						"window": win,
						"Rnk_folder": Rnk_folder}

				sar_processed_data_collection.insert(post)
				post = None
			'''
            for take in range(starting_take, self.ntakes + 1):
                print "Processing %d out of %d." % (take, self.ntakes)
                s21 = np.empty([self.nx, self.nfre], dtype=np.complex64)
                data = self.sar_collection.find({'take_number': str(take)})

                #if data.count() < self.nx:
                #	continue

                for position in range(self.nx):
                    data_real = literal_eval(
                        data[position]['data']['data_real'])
                    data_imag = literal_eval(
                        data[position]['data']['data_imag'])
                    s21[position, :] = np.array(
                        data_real)[0] + 1j * np.array(data_imag)[0]

                if win:
                    s21 = s21 * np.hanning(s21.shape[1])
                    s21 = s21 * np.hanning(s21.shape[0])[:, np.newaxis]

                dt = datetime.strptime(data[self.nx - 1]['datetime'],
                                       "%Y-%m-%d %H:%M:%S.%f")
                date = str(dt.date())
                time = str(dt.time().strftime("%H:%M:%S"))

                self.tm_algorithm(s21, Rnk, take, date, time, results_folder,
                                  sar_processed_data_collection)
Example #36
0
    def calculate_sliding(self, threshold, output_images):
        self.threshold = threshold

        print "Processing data..."

        correlation = self.calculate_correlation()

        aux = int((self.yf - (self.xf * np.tan(48.0 * np.pi / 180.0))) *
                  correlation.shape[0] / (self.yf - self.yi))

        mask_aux = np.zeros(correlation.shape)

        count = 0
        nposx = correlation.shape[1]
        nposy = correlation.shape[0]

        for k in range(nposy):
            if k >= (aux + 1):
                mask_aux[k, 0:count] = 1
                mask_aux[k, nposx - count - 1:nposx - 1] = 1
                count = count + 1
        masked_values = np.ma.masked_where(mask_aux == 0, mask_aux)

        fig = plt.figure(1)
        plt.title("Complex correlation magnitude", fontsize=11)
        im = plt.imshow(np.absolute(correlation),
                        cmap='jet',
                        aspect='auto',
                        extent=[self.xi, self.xf, self.yi, self.yf],
                        vmin=0,
                        vmax=1)
        plt.imshow(masked_values,
                   cmap='Greys',
                   aspect='auto',
                   extent=[self.xi, self.xf, self.yi, self.yf])
        plt.colorbar(im, orientation='vertical', format='%.1f')
        plt.savefig(self.results_folder + "/complex_correlation_mag.png")
        fig.clear()

        mask = np.absolute(correlation)
        mask[mask < threshold] = 0
        mask[mask >= threshold] = 1

        vmax = np.pi * self.lambda_d
        vmin = -1 * vmax

        mag_mean = np.zeros(self.ntakes - 1)
        phase_mean = np.zeros(self.ntakes - 1)
        phase_mean_sum = np.zeros(self.ntakes - 1)
        phase_std_dev = np.zeros(self.ntakes - 1)
        date_values = []

        fig = plt.figure(1)

        file_temp = h5py.File(self.results_folder + "/mask.hdf5", 'w')
        dset = file_temp.create_dataset("Mask", (mask.shape), dtype=np.uint8)
        dset[...] = mask
        file_temp.close()

        for take in range(1, self.ntakes):
            print "Processing take %d and %d." % (take, take + 1)
            data = self.sar_collection.find_one({'take': str(take)})
            file_temp = h5py.File(data['route'], 'r')
            dset = file_temp["Complex_image"]
            Imagen_master = dset[...]
            file_temp.close()

            data = self.sar_collection.find_one({'take': str(take + 1)})
            date = (data['date'])
            time = (data['time'])
            file_temp = h5py.File(data['route'], 'r')
            dset = file_temp["Complex_image"]
            Imagen_slave = dset[...]
            file_temp.close()

            phase = np.angle(Imagen_master * np.conj(Imagen_slave))
            magnitude = np.absolute(Imagen_master)
            masked_angle = np.ma.masked_where(mask == 0, phase)
            masked_magnitude = np.ma.masked_where(mask == 0, magnitude)

            mag_mean[take - 1] = masked_magnitude.mean()
            phase_mean[take - 1] = masked_angle.mean() * self.lambda_d
            phase_mean_sum[take - 1] = np.sum(phase_mean)
            phase_std_dev[take - 1] = np.std(masked_angle) * self.lambda_d
            date_values.append(
                datetime.strptime(''.join((date, time)), ''.join(
                    (date_format, time_format))))

            if output_images:
                fig.suptitle("Image %d and %d" % (take, take + 1))
                plt.ylabel('Range (m)', fontsize=14)
                plt.xlabel('Cross-range (m)', fontsize=14)

                if take == 1:
                    im = plt.imshow(
                        self.lambda_d * phase,
                        cmap='jet',
                        aspect='auto',
                        extent=[self.xi, self.xf, self.yi, self.yf],
                        vmin=vmin,
                        vmax=vmax)
                    cbar = plt.colorbar(im,
                                        orientation='vertical',
                                        format='%.2f')
                im = plt.imshow(self.lambda_d * phase,
                                cmap='jet',
                                aspect='auto',
                                extent=[self.xi, self.xf, self.yi, self.yf],
                                vmin=vmin,
                                vmax=vmax)
                plt.savefig(self.results_folder + "/take%d_%d.png" %
                            (take, (take + 1)))

        fig.clear()
        fig = plt.figure(figsize=(10.0, 6.0))

        plt.subplot(221)
        plt.title(r'$\overline{\Delta r}\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\overline{\Delta r}\/\/(mm)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_mean)
        ax = plt.gca()
        ax.set_ylim(
            [-(np.amax(phase_mean) * 2.0), (np.amax(phase_mean) * 2.0)])
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.subplot(222)
        plt.title(r'$\overline{\Delta r_{acc}}\/\/(mm)\/\/vs\/\/time$',
                  fontsize=16)
        plt.ylabel(r'$\overline{\Delta r_{acc}}$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_mean_sum)
        ax = plt.gca()
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        sub = plt.subplot(223)
        plt.title(r'$\sigma_{\Delta r}\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\sigma_{\Delta r}\/\/(mm)$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, phase_std_dev)
        ax = plt.gca()
        ax.set_ylim([0.0, np.amax(phase_std_dev) * 1.2])
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.subplot(224)
        plt.title(r'$\overline{mag}\/\/vs\/\/time$', fontsize=16)
        plt.ylabel(r'$\overline{mag}$', fontsize=16)
        plt.xlabel(r'$time$', fontsize=16)
        plt.plot(date_values, mag_mean)
        ax = plt.gca()
        ax.set_ylim([0.0, np.amax(mag_mean) * 1.2])
        ax.xaxis.set_major_formatter(DateFormatter('%d/%m %H:%M'))
        labels = ax.get_xticklabels()
        plt.setp(labels, rotation=30, fontsize=10)

        plt.tight_layout()
        plt.savefig(self.results_folder + "/statistical_report.png")

        print "Done!"
Example #37
0
def date(value):
    """Parse a valid looking date in the format YYYY-mm-dd"""
    date = datetime.strptime(value, "%Y-%m-%d")
    if date.year < 1900:
        raise ValueError(u"Year must be >= 1900")
    return date
Example #38
0
def convert_timestamp(folder: str, ext: str = ".log") -> None:
    for file in listdir(folder):
        if path.splitext(file)[1] != ext:
            continue
        date_time = datetime.strptime(file, "%Y%m%d-%H%M%S{0}".format(ext))
        rename(path.join(folder, file), path.join(folder, str(date_time.timestamp()) + ext))
Example #39
0
def _dbs_dob_matches(record, applicant_date_of_birth):
    return datetime.strptime(applicant_date_of_birth,
                             '%Y-%m-%d') == datetime.strptime(
                                 record['date_of_birth'], '%Y-%m-%d')
Example #40
0
 def _get_purchase_schedule_date(self):
     procurement_date_planned = datetime.strptime(
         self.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
     schedule_date = (procurement_date_planned -
                      relativedelta(days=self.company_id.po_lead))
     return schedule_date
Example #41
0
    def parse(self, response):
        self.driver.get(response.url)
        time.sleep(3)
        name = self.driver.find_element_by_css_selector('.startup_name').text
        profile_url = str(response.url)
        company_website = self.driver.find_element_by_css_selector('.startup_website').get_attribute("href")
        try:
            location = self.driver.find_element_by_css_selector('.startup_location').text
        except:
            location = ''
        tags = self.driver.find_element_by_css_selector('.startup_market').text
        founding_day = 1
        try:
            founding_month = self.driver.find_element_by_css_selector('.startup_found_month').text
            founding_year = self.driver.find_element_by_css_selector('.startup_found_year').text
            datetime_string = "{0} {1}, {2}".format(founding_day, founding_month, founding_year)
            founding_date = datetime.strptime(datetime_string, '%d %B, %Y')
            founding_date = founding_date.strftime('%Y-%m-%d')
        except:
            founding_date = ''

        try:
            social_urls = self.driver.find_element_by_xpath("//div[@class='col-md-5 socials pdt text-right']")
            social_urls = social_urls.find_elements_by_tag_name('a')
            social_urls_list = []

            for social_url in social_urls:
                social_url_href = social_urls_list.append(social_url.get_attribute("href"))

            social_urls_string = ", ".join(social_urls_list)
        except Exception as e:
            print(e)
            social_urls_string = ''
        try:
            main_description = self.driver.find_element_by_css_selector('.profile-desc-text')
            description = main_description.text
            html_description = main_description.get_attribute('innerHTML')
            soup = BeautifulSoup(html_description, 'lxml')
            try:
                p_tag = soup.select('p')
                raw_p_tag = str(p_tag[0]).split('<br/>')[0]
                cleanr = re.compile('<.*?>')
                short_description = re.sub(cleanr, '', raw_p_tag)

            except:
                short_description = ''
        except:
            description, short_description = '', ''

        try:
            #founder_part = self.driver.find_element_by_xpath("//div[@class='row team team-member-parent']")
            team = self.driver.find_elements_by_xpath("//div[@class='desc']")
            founders = []
            for i in range(0,len(team)):
                statement = team[i].find_element_by_xpath("//span[@class='profile-text block role']").text.lower()
                if 'founder' in statement:
                    founder = team[i].find_element_by_xpath("//span[@class='item-label bold member-name']").text
                    founders.append(founder)
            founders = ''.join(founders)
        except Exception as e:
            founders = ''
        emails = is_email_phone(description, 'email')
        phones = is_email_phone(description, 'phone')

        emp_r = ''
        for lin in social_urls_list:
            reg = re.compile('\d+')
            if 'linkedin.com' in lin:
                self.driver.get(lin)
                try:
                    data_for_login = yaml_loader('settings.yaml')
                    log_email = data_for_login['Info']['LinkedIn_email']
                    log_pass = data_for_login['Info']['LinkedIn_password']
                    log =self.driver.find_element_by_link_text('Sign in')
                    log.click()
                    email =self.driver.find_element_by_id('login-email')
                    email.send_keys(log_email)
                    password =self.driver.find_element_by_id('login-password')
                    password.send_keys(log_pass)
                    password.send_keys(Keys.ENTER)
                except Exception as e:
                    print(e)

                try:
                    employee_range = self.driver.find_element_by_css_selector(
                            '.link-without-visited-state.inline-block.ember-view').text
                    emp_r = reg.findall(employee_range)
                    if len(emp_r) > 1:
                        emp_r= str(emp_r[0]) + str(emp_r[1])
                    else:
                        emp_r = emp_r[0]
                except:
                    pass
        yield {"company_name":name,"company_url":profile_url, "company_website_url":company_website,
                "location":location,"tags":tags, "founding_date":founding_date,
                "founders": founders,"employee_range":emp_r, "urls":social_urls_string,"emails":emails,
                "phones":phones,"description_short":short_description,"description":description}
Example #42
0
 def ymd_to_jd(self, time_str, fmt='%Y%m%d'):
     dt = datetime.strptime(time_str, fmt)
     tt = dt.timetuple()
     return tt.tm_yday
Example #43
0
    def parse(self, proxy=False):
        result = []

        # avoiding blocks
        headers = {
            'user-agent': feed.UserAgent_random().lstrip(),
            'referer': 'https://www.google.com/search?newwindow=1&q='+self.href
        }
        if proxy != False:
            proxyDict = {
                "http": "http://" + proxy, 
                "https": "https://" + proxy,
            }
        else:
            proxyDict = {}

        # custom ранобэ.рф API import
        if self.href.find('http://xn--80ac9aeh6f.xn--p1ai/') != -1:
            request = f"https://xn--80ac9aeh6f.xn--p1ai/api/v2/books/{ self.href[31:-1] }/chapters"
            request = requests.get(request).json()  # (request, headers=headers, proxies=proxyDict)

            for each in request['items']:
                # ignoring payed chapters
                if each['availabilityStatus'] == 'free':
                    result.append(feedUpdate(
                        name=each["title"],
                        href="http://xn--80ac9aeh6f.xn--p1ai"+each["url"],
                        datetime=datetime.strptime(each["publishTime"], '%Y-%m-%d %H:%M:%S'),
                        title=self.title))

        # custom instagram import
        if self.href.find('https://www.instagram.com/') != -1:
            if not randint(0, 100) == 0:
                return []
            try:
                request = requests.get(self.href, headers=headers, proxies=proxyDict)
                request = BeautifulSoup(request.text, "html.parser")

                for each in request.find_all('script'):
                    data = 'window._sharedData = '
                    if each.text.find(data) != -1:
                        # preparing JSON
                        data = each.text.find(data) + len(data)  # data start position
                        data = each.text[data:-1]  # -1 is for removing ; in the end
                        data = json.loads(data)

                        # selecting data from JSON
                        data = data['entry_data']['ProfilePage'][0]['graphql']
                        data = data['user']['edge_owner_to_timeline_media']['edges']

                        # parsing data from JSON
                        for each in data:
                            # avoiding errors caused by empty titles
                            try:
                                result_name = each['node']['edge_media_to_caption']['edges'][0]['node']['text']
                            except IndexError:
                                result_name = 'no title'

                            result.append(feedUpdate(
                                name=result_name,
                                href="http://instragram.com/p/"+each['node']['shortcode'],
                                datetime=datetime.fromtimestamp(each['node']['taken_at_timestamp']),
                                title=self.title))
            except (KeyError, requests.exceptions.ProxyError, requests.exceptions.SSLError) as err:
                return []

        # custom RSS YouTube converter (link to feed has to be converted manually)
        elif self.href.find('https://www.youtube.com/channel/') != -1:
            self.href_title = self.href[:]
            # 32 = len('https://www.youtube.com/channel/')
            # 7 = len('/videos')
            self.href = "https://www.youtube.com/feeds/videos.xml?channel_id=" + self.href[32:-7]
            result = feed.parse(self)

        # custom RSS readmanga converter (link to feed has to be converted manually to simplify feed object creation)
        elif self.href.find('http://readmanga.me/') != -1 and self.href.find('readmanga.me/rss/manga') == -1 and self.href_title == None:
            # 20 = len('http://readmanga.me/')
            self.href = "feed://readmanga.me/rss/manga?name=" + self.href[20:]
            result = feed.parse(self)

        # custom RSS mintmanga converter (link to feed has to be converted manually to simplify feed object creation)
        elif self.href.find('http://mintmanga.com/') != -1 and self.href.find('mintmanga.com/rss/manga') == -1 and self.href_title == None:
            # 21 = len('http://mintmanga.com/')
            self.href = "feed://mintmanga.com/rss/manga?name=" + self.href[21:]
            result = feed.parse(self)

        # custom RSS deviantart converter (link to feed has to be converted manually to simplify feed object creation)
        elif self.href.find('https://www.deviantart.com/') != -1:
            self.href_title = self.href[:]
            # 27 = len('https://www.deviantart.com/')
            # 9 = len('/gallery/')
            self.href = self.href[27:-9]
            self.href = "http://backend.deviantart.com/rss.xml?q=gallery%3A" + self.href
            result = feed.parse(self)

        # custom fantasy-worlds.org loader
        elif self.href.find('https://fantasy-worlds.org/series/') != -1:
            strainer = SoupStrainer('div', attrs={'class': 'rightBlock'})

            request = requests.get(self.href, headers=headers, proxies=proxyDict)
            request = BeautifulSoup(request.text, "html.parser", parse_only=strainer)

            for each in request.find('ul').find('li').find('ul').find('li').find('ul').find_all('li'):
                result.append(feedUpdate(
                    name=f"{self.title} {each.text[:each.text.find(' // ')]}",
                    href=each.find('a')['href'],
                    datetime=datetime.now(),  # <=== fake date
                    title=self.title))

        # custom pikabu import
        elif self.href.find('pikabu.ru/@') != -1:
            # try:
            strainer = SoupStrainer('div', attrs={'class': 'stories-feed__container'})

            request = requests.get(self.href, headers=headers, proxies=proxyDict)
            request = BeautifulSoup(request.text, "html.parser", parse_only=strainer)

            for each in request.find_all('article'):
                try:
                    result_datetime = each.find('time')['datetime'][:-3]+"00"
                    result_datetime = datetime.strptime(result_datetime, '%Y-%m-%dT%H:%M:%S%z')

                    result.append(feedUpdate(
                        name=each.find('h2', {'class': "story__title"}).find('a').getText(),
                        href=each.find('h2', {'class': "story__title"}).find('a')['href'],
                        datetime=result_datetime,
                        title=self.title))

                except (TypeError, AttributeError) as err:
                    # advertisement, passing as no need to save it
                    pass
            # except (requests.exceptions.ConnectionError, requests.exceptions.SSLError) as err:
            #     # failed connection, hope it works from time to time
            #     return []

        # # custom fanserials parser
        # elif self.href.find('http://fanserial.net/') != -1 and self.filter is not None:
        #     strainer = SoupStrainer('ul', attrs={'id': 'episode_list'})
        #
        #     request = requests.get(self.href, headers=headers, proxies=proxyDict)
        #     request = BeautifulSoup(request.text, "html.parser", parse_only=strainer)
        #     print(request)
        #
        #     for each in request.find_all('li'):
        #         print(each)
        #         result_href = ''
        #         for each_span in each.find('div').find('div', attrs={'class': 'serial-translate'}).find_all('span'):
        #             result_href = 'http://fanserial.tv' + each_span.find('a').get('href')
        #
        #         result.append(feedUpdate(
        #             name=each.find('div', attrs={'class': 'field-description'}).find('a').text,
        #             href=result_href,
        #             datetime=datetime.now(),  # <=== fake date
        #             title=self.title))

        # default RSS import
        else:
            proxyDict = urllib.request.ProxyHandler(proxyDict)

            request = feedparser.parse(self.href, request_headers=headers, handlers=[proxyDict])

            for each in request["items"]:
                # HREF RESULT
                if self.title == "Expresso":
                    result_href = each["summary"]

                    start = result_href.find('https://expres.co/')
                    end = result_href.find('"')

                    result_href = result_href[start:end]
                else:
                    result_href = each["links"][0]["href"]

                # DATE RESULT: parsing dates
                if "published" in each:
                    result_datetime = each["published"]
                elif "updated" in each:
                    result_datetime = each["updated"]
                else:
                    print(f"result_datetime broke for { self.title }")
                
                tzinfos = {'PDT': gettz("America/Los_Angeles"), 'PST': gettz("America/Juneau")}
                result_datetime = parser.parse(result_datetime, tzinfos=tzinfos)

                # APPEND RESULT
                result.append(feedUpdate(
                    name=each["title_detail"]["value"],
                    href=result_href,
                    datetime=result_datetime,
                    title=self.title))

        # universal postfixes
        result_filtered = []
        for each in result:
            # FILTERING: passing item cycle if filter does not match
            if self.filter is not None:
                if each.name.find(self.filter) == -1 or each.href.find(self.filter) == -1:
                    continue

            # DATETIME fixes
            # fix timezone unaware
            # if each.datetime.tzinfo is not None and each.datetime.tzinfo.utcoffset(each.datetime) is not None:
            #     each_dt = localtime(each.datetime)
            #     each.datetime = datetime(each_dt.year, each_dt.month, each_dt.day,
            #          each_dt.hour, each_dt.minute, each_dt.second)
                     
            # if each.datetime.tzinfo is not None and each.datetime.tzinfo.utcoffset(each.datetime) is not None:
            #     print("!!!! WARNING !!!!")
            # # add DELAY
            # if type(self.delay) is not type(None):
            #     each.datetime += timedelta(hours=self.delay)

            # NAME fixes
            each.name = ' '.join(each.name.split())
            each.name = each.name[:140]  # SQLite does not support max-length
            # extra symbols
            if each.title == 'Shadman':
                each.name = each.name[:each.name.find('(')-1]
            elif each.title == 'Apple' and each.name[-len('Apple'):] == 'Apple':
                # - symbol can be a variety of different symbols
                # 8 = len(' - Apple')
                each.name = each.name[:-8]
            elif each.title == 'LastWeekTonight':
                end = each.name.find(': Last Week Tonight with John Oliver (HBO)')
                if end != -1:
                    each.name = each.name[:end]

            result_filtered.append(each)

        return result_filtered
Example #44
0
        usage()
    except getopt.GetoptError, error:
        usage("ERROR: " + str(error))

    command_offset = 3
    if "-p" in sys.argv[:4]:
        command_offset += 2

    command = " ".join(sys.argv[command_offset:])

    if command:
        input.test_params["command"] = command

    if input.param("parallel", True):
        # workaround for a python2.6 bug of using strptime with threads
        datetime.strptime("30 Nov 00", "%d %b %y")
        RemoteJob().parallel_remote(input)
    else:
        RemoteJob().sequential_remote(input)


if __name__ == "__main__":
    main()


def create_log_file(log_config_file_name, log_file_name, level):
    tmpl_log_file = open("jython.logging.conf")
    log_file = open(log_config_file_name, "w")
    log_file.truncate()
    for line in tmpl_log_file:
        newline = line.replace("@@LEVEL@@", level)
Example #45
0
def listaFila(request):
    if has_permission(request.user, 'permissao_unidade'):
        unidade = UnidadeSaude.objects.filter(users=request.user)
        filas = [None]
        aux_filas = [None]
        hoje = date.today()
        consultas = None
        autorizacoes = None
        exames = None
        objetos_aux = None

        if unidade:

            if request.method == 'POST':
                data = request.POST.get('data', None)
                dataFinal = request.POST.get('dataFinal', None)
                objeto = request.POST.get('objeto', None)

                if objeto == 'Consultas':
                    obj_aux = 'Consultas'
                    consultas = unidade[0].consultas.all()
                    objetos = unidade[0].consultas.filter(
                        data=hoje, create_fila=True).order_by('hora', 'data')
                elif objeto == 'Autorizacoes':
                    obj_aux = 'Autorizacoes'
                    autorizacoes = unidade[0].autorizacoes.all()
                    objetos = unidade[0].autorizacoes.filter(
                        data=hoje, create_fila=True).order_by('hora', 'data')
                elif objeto == 'Exames':
                    obj_aux = 'Exames'
                    exames = unidade[0].exames.all()
                    objetos = unidade[0].exames.filter(
                        data=hoje, create_fila=True).order_by('hora', 'data')

                context = {'objeto': obj_aux}

                if objetos:
                    context['objetos'] = objetos

                else:
                    context['msg_alert'] = 'Não possui Filas para hoje'

                if data:
                    try:
                        if consultas:
                            objetos_aux = unidade[0].consultas.filter(
                                data__range=[
                                    datetime.strptime(data, '%Y-%m-%d'),
                                    datetime.strptime(dataFinal, '%Y-%m-%d')
                                ],
                                create_fila=True).order_by('hora', 'data')
                        elif autorizacoes:
                            objetos_aux = unidade[0].autorizacoes.filter(
                                data__range=[
                                    datetime.strptime(data, '%Y-%m-%d'),
                                    datetime.strptime(dataFinal, '%Y-%m-%d')
                                ],
                                create_fila=True).order_by('hora', 'data')
                        elif exames:
                            objetos_aux = unidade[0].exames.filter(
                                data__range=[
                                    datetime.strptime(data, '%Y-%m-%d'),
                                    datetime.strptime(dataFinal, '%Y-%m-%d')
                                ],
                                create_fila=True).order_by('hora', 'data')

                        if objetos_aux:
                            context['objetos'] = objetos_aux
                            context['msg_alert'] = None

                        else:
                            context[
                                'msg_busca'] = 'Nada encontrado para esses parâmetros!'

                    except ValueError:
                        context['msg_busca'] = 'Informe uma data válida!'

                return render(request, 'lista_fila.html', {'context': context})

            return render(request, 'lista_fila.html')

        else:
            context = {'msg_error': 'Indisponivel Acessar Essa Área'}

        return render(request, 'home.html', {'context': context})
    else:
        context = {'msg_error': 'Impossivel Acessar Essa Área'}
        return render(request, 'home_usuario.html', {'context': context})
Example #46
0
def main():

    logging.config.fileConfig(fname=os.path.join('config', 'log.config'), disable_existing_loggers=False)

    # Get the logger specified in the file
    f_handler = logging.FileHandler(os.path.join('logs', 'generate_fred.log'))
    f_handler.setLevel(logging.DEBUG)
    log = logging.getLogger(__name__)
    f_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    f_handler.setFormatter(f_format)
    log.addHandler(f_handler)

    parser = argparse.ArgumentParser()
    parser.add_argument('-p', '--prob-minimum', default=0.75, dest='prob_minimum',
                        help='Minimum probability to be considered as fire grids')
    parser.add_argument('-s', '--start-date', dest='start_date_str', help='Start date')
    parser.add_argument('-e', '--end-date', dest='end_date_str', help='End date')
    parser.add_argument('--max-sunglint-angle', dest='max_sunglint_angle', default=12.0, type=float,
                        help='Sun glint angle is defined as angle between reflected solar path \
                             and satellite view angle. If hotspot has sun glint angle less than the max, \
                             the hotspot is rejected.')
    parser.add_argument('-a', '--alpha', dest='alpha', default=0.5, help='Alpha weight for polar orbiting satellites')
    parser.add_argument('--ahi-hotspot-folder', dest='ahi_folder', default=os.path.join('..', 'data', 'raw',
                        'hotspots', 'ahi'), help='AHI hotspot folder')
    parser.add_argument('--viirs-hotspot-folder', dest='viirs_folder', default=os.path.join('..', 'data', 'raw',
                        'hotspots', 'viirs'), help='VIIRS hotspot folder')
    parser.add_argument('--modis-hotspot-folder', dest='modis_folder', default=os.path.join('..', 'data', 'raw',
                        'hotspots', 'modis'), help='MODIS hotspot folder')
    parser.add_argument('-o', '--output-folder', dest="out_file_path", default=os.path.join('..', 'data'),
                        help="Specify output folder")
    parser.add_argument('-g', '--grid', dest="grid_shp", default=os.path.join('..', 'references', 'shapefile',
                        '2km_grid_ASEAN_land_clipped.shp'), help="Specify grid .shp file")
    parser.add_argument('-n', '--name', dest="prefix_name", default='LATE_2km_', help="Prefix for output file names")

    args = parser.parse_args()
    log.debug(args)
    alpha = args.alpha
    prob_minimum = args.prob_minimum
    max_sunglint_angle = args.max_sunglint_angle

    with open(os.path.join('config', 'config.json'), "r") as read_file:
        json_config = json.load(read_file)

    clipping_box = json_config['parameters']['clipping_box']
    gamma = json_config['parameters']['gamma']
    sat_resolution_meter = json_config['sat_resolution_meter']
    shapefile_path = json_config['shapefile']['path']
    bounding_box = json_config['plotting']['bounding_box']
    dpi = json_config['plotting']['dpi']
    model_fpath = json_config['model']['path']
    h8_ml_model_fpath = json_config['h8_ml_model']['path']
    h8_predict_features = json_config['h8_ml_model']['predict_features']
    low_prob_thres = json_config['parameters']['low_prob_thres']
    med_prob_thres = json_config['parameters']['med_prob_thres']

    #read in grid shapefile
    try:
        df_grid = geopandas.read_file(args.grid_shp)
        df_grid.crs = {'init': 'epsg:3857'}
        log.debug(args.grid_shp + ' loaded successfully!')
    except Exception as e:
        log.error(args.grid_shp + ' cannot be loaded !')
        exit()

    start_date = datetime.strptime(args.start_date_str, "%Y-%m-%d %H:%M")
    date_process = start_date
    end_date = datetime.strptime(args.end_date_str, "%Y-%m-%d %H:%M")

    for days in range(0, 31):
        geo_hs = geohotspot.GeoHotspot()

        log.info('Reading hotspot .txt files')

        jp1_files = '../data/raw/hotspots/JP1_LATE_APR_SEP_DAY_2019.txt'
        geo_hs.get_hs_from_csv_db(file_path=jp1_files, sat_name='JP1_LATE')

        npp_files = '../data/raw/hotspots/NPP_LATE_APR_SEP_DAY_2019.csv'
        geo_hs.get_hs_from_csv_db(file_path=npp_files, sat_name='NPP_LATE')

        while date_process <= (end_date + timedelta(days=1)):
            h8_files = os.path.join(args.ahi_folder, "H08_*" + date_process.strftime('%Y%m%d_0')
            #h8_files = os.path.join(args.ahi_folder, "H08_*" + date_process.strftime('%Y%m%d_01')
                                    + "*_L2WLFbet_FLDK.06001_06001.csv")
            geo_hs.parse_jaxa_hotspot_txt(file_path=h8_files)

            modis_files = os.path.join(args.modis_folder, "*14." + date_process.strftime('%y%j') + "*.txt")
            geo_hs.parse_modis_mod14_txt(file_path=modis_files)

            date_process = date_process + timedelta(days=1)

        # remove hotspots outside of clipping area
        geo_hs.clip_hotspot(clipping_box)
        # reject hotspots due to sun glint
        #geo_hs.reject_sunglint_hs('Himawari-8/9', max_sunglint_angle)

        geo_df = geo_hs.hs_df.copy()
        geo_df['polar_weight'] = 0.0
        geo_df['aqua_weight'] = 0.0
        geo_df['terra_weight'] = 0.0
        geo_df['n20_weight'] = 0.0
        geo_df['npp_weight'] = 0.0

        geo_df['geo_weight'] = 0.0
        geo_df['weight'] = 0.0
        geo_df['confidence'] = geo_df['confidence'].fillna(0)
        geo_df.astype({'geo_weight': 'float64', 'polar_weight': 'float64', 'weight': 'float64', 'confidence': 'float64'})

        geo_df['date'] = pd.to_datetime(geo_df['date'], format="%d/%m/%Y %H:%M:%S")
        # selects period of interest
        geo_df = geo_df[(geo_df['date'] >= start_date) & (geo_df['date'] <= end_date)]
        log.debug(geo_df['date'].unique())
        log.debug(geo_df[['satellite', 'date']].groupby(['satellite']).count())

        try:
            h8_ml_model = load(h8_ml_model_fpath)
            log.debug('Loaded trained H8 ML model from ' + h8_ml_model_fpath)
            log.debug(f'Model pipeline: {h8_ml_model}')
            geo_df.loc[geo_df['satellite'] == 'Himawari-8/9', 'geo_weight'] = h8_ml_model.predict_proba(
                geo_df.loc[geo_df['satellite'] == 'Himawari-8/9', h8_predict_features])[:, 1]
            log.info('Added in probabilities using H8 Gradient Boosting Model.')
        except Exception as e:
            log.exception(e)

        geo_df.loc[geo_df['satellite'] == 'TERRA', 'terra_weight'] = \
            geo_df.loc[geo_df['satellite'] == 'TERRA', 'confidence'] / 100.0
        geo_df.loc[geo_df['satellite'] == 'AQUA', 'aqua_weight'] = \
            geo_df.loc[geo_df['satellite'] == 'AQUA', 'confidence'] / 100.0
        geo_df.loc[geo_df['satellite'] == 'JP1_LATE', 'n20_weight'] = \
            geo_df.loc[geo_df['satellite'] == 'JP1_LATE', 'confidence'] / 100.0
        geo_df.loc[geo_df['satellite'] == 'NPP_LATE', 'npp_weight'] = \
            geo_df.loc[geo_df['satellite'] == 'NPP_LATE', 'confidence'] / 100.0

        # count number of Himawari observations
        geo_obs_count = int((end_date - start_date).seconds / 600)
        # normalize the weight for Himawari
        geo_df['geo_weight'] = geo_df['geo_weight'] / geo_obs_count

        # assign zero weight for hotspots due to sun glint
        #sun_glint_flag = (geo_df['sunglint_angle'] <= max_sunglint_angle) & (geo_df['satellite'] == "Himawari-8/9")
        #geo_df.loc[sun_glint_flag, 'geo_weight'] = 0.0

        num_polar_sat = len(geo_df.loc[geo_df['satellite'] != 'Himawari-8/9', 'satellite'].unique())

        if num_polar_sat >= 1:
            geo_df['polar_weight'] = (1.0 / num_polar_sat) * (geo_df['confidence'] / 100.0)

        geo_df['weight'] = ((1 - alpha) * geo_df['geo_weight']) + (alpha * geo_df['polar_weight'])

        #round to 4 decimals to save storage
        geo_df = geo_df.round(8)


        try:
            gdf = geopandas.GeoDataFrame(geo_df, geometry=geopandas.points_from_xy(geo_df.lon, geo_df.lat))
            log.debug('Created geopandas DataFrame')
        except Exception as e:
            log.exception(e)

        # transform to mercator epsg 3857
        gdf.crs = {'init': 'epsg:4326'}
        gdf_merc = gdf.to_crs({'init': 'epsg:3857'})

        gdf_merc.reset_index(inplace=True, drop=True)

        gdf_merc['x'] = gdf_merc['geometry'].x
        gdf_merc['y'] = gdf_merc['geometry'].y

        for key, value in sat_resolution_meter.items():
            gdf_merc.loc[gdf_merc['satellite'] == key, 'resolution_meter'] = value

        try:
            interim_file_path = os.path.join(args.out_file_path, 'interim')
            os.makedirs(interim_file_path, exist_ok=True)
        except Exception as e:
            log.exception(e)
            log.warning(interim_file_path + ' directory cannot be created!')

        try:
            processed_file_path = os.path.join(args.out_file_path, 'processed')
            os.makedirs(processed_file_path, exist_ok=True)
        except Exception as e:
            log.exception(e)
            log.warning(processed_file_path + ' directory cannot be created!')

        try:
            hotspot_json = os.path.join(interim_file_path, args.prefix_name + 'hotspot_'
                                        + end_date.strftime('%Y%m%d') + '.geojson')
            gdf_merc.to_file(hotspot_json, driver='GeoJSON')
            log.info(hotspot_json + ' is saved successfully.')
        except Exception as e:
            log.exception(e)
            log.warning(hotspot_json + ' export warning!')

        # create polygon
        for index, row in gdf_merc.iterrows():
            gdf_merc['geometry'].iloc[index] = get_poly_box(row['x'], row['y'], row['resolution_meter'])

        try:
            hotspot_polygon_json = os.path.join(interim_file_path, args.prefix_name +
                                                'hotspot_polygon_' + end_date.strftime('%Y%m%d') + '.geojson')
            gdf_merc.round(4)
            gdf_merc.to_file(hotspot_polygon_json, driver='GeoJSON')
            log.info(hotspot_polygon_json + ' is saved successfully.')
        except Exception as e:
            log.exception(e)
            log.warning(hotspot_polygon_json + ' export warning!')

        try:
            log.debug('Processing grid sjoin...')
            df_grid_joined = geopandas.sjoin(df_grid, gdf_merc, op='intersects')
            grid_weight_total = df_grid_joined[['id', 'polar_weight', 'geo_weight', 'weight', 'terra_weight',
                                                'aqua_weight', 'n20_weight', 'npp_weight']].groupby(['id']).sum()
#           grid_weight_total = df_grid_joined[['id', 'polar_weight', 'geo_weight', 'weight']].groupby(['id']).sum()
            grid_geometry = df_grid_joined[['id', 'geometry']].groupby(['id']).first()
            processed_grid = pd.merge(grid_weight_total, grid_geometry, on='id')
            processed_grid_gpd = geopandas.GeoDataFrame(processed_grid)
            processed_grid_gpd.crs = {'init': 'epsg:3857'}
            log.debug('Processing grid completed.')
        except Exception as e:
            log.exception(e)
            log.error('Unable to process grid sjoin!')

        # try:
        #     trained_model = pickle.load(open(model_fpath, 'rb'))
        #     log.debug('Loaded trained model from ' + model_fpath)
        #     processed_grid_gpd['prob'] = trained_model.predict_proba(processed_grid_gpd['weight'].
        #                                                              values.astype(float).reshape(-1,1))[:, 1]
        #     print (processed_grid_gpd.head())
        #     log.info('Probabilities filled using trained model.')
        # except Exception as e:
        #     log.exception(e)
        #
        try:
            hotspot_grid_json = os.path.join(processed_file_path, args.prefix_name + 'hotspot_grid_'
                                             + end_date.strftime('%Y%m%d') + '.geojson')
            processed_grid_gpd.to_file(hotspot_grid_json, driver='GeoJSON')
            log.info(processed_grid_gpd + ' is saved successfully.')
        except Exception as e:
            log.warning(hotspot_grid_json + ' export warning!')
        #
        # try:
        #     ann_file = os.path.join(processed_file_path, args.prefix_name + 'hotspot_grid_'
        #                             + end_date.strftime('%Y%m%d') + '.ann')
        #     save_fred_grid_meteor_ann(processed_grid_gpd, ann_file, low_prob_thres, med_prob_thres)
        #     log.info(ann_file + ' is saved successfully.')
        # except Exception as e:
        #     log.exception(e)
        #     log.warning(ann_file + ' cannot be saved!')

        start_date = start_date + timedelta(days=1)
        date_process = start_date
        end_date = end_date + timedelta(days=1)
Example #47
0
    def _parseMember(self, text):
        data = np.array(text.split('\r\n'))
        data_idxs = []
        new_record = False
        begin_idx = 0
        member_name = data[0]
        dates = []
        # Figure out the indices for the data chunks
        for i in range(len(data)):
            if "STID" in data[i]:
                # Here is information about the record
                spl = data[i].split()

                if spl[2].strip() == "STNM":
                    station = "" # The bufkit file has a blank space for the station name
                    wmo_id = spl[4]
                    dates.append(datetime.strptime(spl[7], '%y%m%d/%H%M'))
                else:
                    station = spl[2]
                    wmo_id = spl[5]
                    dates.append(datetime.strptime(spl[8], '%y%m%d/%H%M'))

                slat = float(data[i+1].split()[2])
                slon = float(data[i+1].split()[5])
                selv = float(data[i+1].split()[8])
                stim = float(data[i+2].split()[2])

            if data[i].find('HGHT') >= 0 and new_record == False:
                # we've found a new data chunk
                new_record = True
                begin_idx = i+1
            elif 'STID' in data[i] and new_record == True:
                # We've found the end of the data chunk
                new_record = False
                data_idxs.append((begin_idx, i-1))
            elif 'STN' in data[i] and new_record == True:
                # We've found the end of the last data chunk of the file
                new_record = False
                data_idxs.append((begin_idx, i))
            elif new_record == True:
                continue
                    ##print data[i]
        
        data_idxs = data_idxs[1:]
        # Make arrays to store the data
        profiles = []        

        # Parse out the profiles
        for i in range(len(data_idxs)):
            data_stuff = data[data_idxs[i][0]: data_idxs[i][1]]
            profile_length = len(data[data_idxs[i][0]: data_idxs[i][1]])//2

            hght = np.zeros((profile_length,), dtype=float)
            pres = np.zeros((profile_length,), dtype=float)
            tmpc = np.zeros((profile_length,), dtype=float)
            dwpc = np.zeros((profile_length,), dtype=float)
            wdir = np.zeros((profile_length,), dtype=float)
            wspd = np.zeros((profile_length,), dtype=float)
            omeg = np.zeros((profile_length,), dtype=float)

            for j in np.arange(0, profile_length * 2, 2):
                if len(data_stuff[j+1].split()) == 1:
                    hght[j // 2] = float(data_stuff[j+1].split()[0])
                else:
                    hght[j // 2] = float(data_stuff[j+1].split()[1])
                tmpc[j // 2] = float(data_stuff[j].split()[1])
                dwpc[j // 2] = float(data_stuff[j].split()[3])
                pres[j // 2] = float(data_stuff[j].split()[0])
                wspd[j // 2] = float(data_stuff[j].split()[6])
                wdir[j // 2] = float(data_stuff[j].split()[5])
                omeg[j // 2] = float(data_stuff[j].split()[7])

            prof = profile.create_profile(profile='raw', pres=pres, hght=hght, tmpc=tmpc, dwpc=dwpc, 
                wdir=wdir, wspd=wspd, omeg=omeg, location=station, date=dates[i], latitude=slat)

            profiles.append(prof)

        return member_name, profiles, dates
Example #48
0
def extract_date(s, loader_context):
    date_format = loader_context['date_format']
    d = datetime.strptime(s, date_format)
    return d.strftime('%d/%m/%Y')
Example #49
0
from datetime import datetime
from datetime import timedelta

with open("2018day4.txt", 'r') as file:
    data = {y : z for y,z in [x.strip('[').split('] ') for x in file.read().splitlines()]}
    guards = {}
    for k,v in sorted(data.items()):
        if 'Guard' in v:
            guard = int(v.split(' ')[1].strip('#'))
            if guard not in guards:
                guards[guard] = []
        elif 'falls' in v:
            sleep_date = datetime.strptime(k, '%Y-%m-%d %H:%M')
        elif 'wakes' in v:
            wake_date = datetime.strptime(k, '%Y-%m-%d %H:%M')
            while sleep_date != wake_date:
                guards[guard].append(int(str(sleep_date).split(':')[1]))
                sleep_date += timedelta(minutes=1)
    mx = max(guards.items(), key = lambda x: len(x[1]))
    print(mx[0] * max(mx[1], key = lambda x: mx[1].count(x)))
    nmx = max(guards.items(), key = lambda x: max(x[1].count(i) for i in range(60)))
    print(nmx[0] * max(nmx[1], key = lambda x: nmx[1].count(x)))
Example #50
0
    def check_model(self, datas, model, model_name=''):
        """
        Keys description in model
        - fields with 'required' key :
            * not required are fill with '' if they are not given
            * required doesn't accept '' value
        - 'type' key : if you provide a type, this type is check first before
                                                                    other keys
        - 'date' key : accepted type for date : str or datetime
                                                        (convert one to other)
        - 'min_size' and 'max_size' : this is used for str or unicode
        - 'min_number' and 'max_number' : this is used for int or float
        keys examples:
        model = {
            'my_field':     {'max_size': 35, 'required': True},
            'my_date':      {'date': '%d/%m/%Y'},
            "weight":       {'max_number': 50, 'type': float},
        }

        Carefull cases considered in this script (search each 'case' string
        in source code below):
            Case 1/ key in model with not in datas :
                    => no check but with a default value '' (only for display)
            Case 2/ key in datas but with a False value (string with no value :
                empty or Null value according to database) :
                    => no check but with a default value ''
            Case 3/ data == 0.0 or 0 which is considered like False but is not :
                    => check but convert in string
        """
        if model_name:
            model_name = '(model: ' + model_name + ')'
        for field, definition in model.items():
            #check type before all other checks if requested in model
            if 'type' in definition and field in datas:
                self.check_type(field, [definition['type']], datas[field])
            to_check = self.must_be_checked(datas, field)
            for key, val in definition.items():
                if to_check:
                    data = datas[field]
                    size = self.evaluate_size_according_to_type(data)
                    if key == 'max_size':
                        self.check_type(field, [str, unicode], data)
                        if size > val:
                            raise InvalidSize(
                                "Max size for field '%s' is "
                                "%s :  %s given\nCurrent field value : %s" %
                                (field, val, size, data))
                    elif key == 'min_size':
                        self.check_type(field, [str, unicode], data)
                        if size < val:
                            raise InvalidSize(
                                "Min size for field '%s' is "
                                "%s :  %s given\nCurrent field value : %s" %
                                (field, val, size, data))
                    elif key == 'min_number':
                        self.check_type(field, [int, float], data)
                        if size < val:
                            raise InvalidSize("Min number for field '%s' is "
                                              "%s :  %s given" %
                                              (field, val, size))
                    elif key == 'max_number':
                        self.check_type(field, [int, float], data)
                        if size > val:
                            raise InvalidSize("Max number for field '%s' is "
                                              "%s :  %s given" %
                                              (field, val, size))
                    elif key == 'in' and data not in val:
                        raise InvalidValueNotInList(
                            "field '%s' with value '%s' must belong "
                            "to this list %s" % (field, data, val))
                    elif key == 'date':
                        self.check_type(field, [str, datetime], data)
                        if isinstance(data, datetime):
                            try:
                                datas[field] = datetime.strftime(data, val)
                            except:
                                raise InvalidType(
                                    "The date '%s' must be in the format '%s'"
                                    % (data, val))
                        elif isinstance(data, str):
                            try:
                                datas[field] = \
                                    datetime.strptime(data, val).strftime(val)
                            except:
                                raise InvalidType(
                                    "The date '%s' must be in the format '%s'"
                                    % (data, val))
                    elif key == 'numeric':
                        #TODO : to end
                        self.check_type(field, [int, float], data)
                        datas[field] = val % data
                    data = ''
                else:
                    if key == 'required' and val is True:
                        raise InvalidMissingField(
                            "Required field '%s' is missing %s" %
                            (field, model_name))
                    else:
                        # must have an empty value to be called
                        # in python template (mako, jinja2, etc)
                        if field not in datas:
                            #case 1/
                            datas[field] = ''
                        elif type(datas[field]) == bool:
                            #case 2/
                            datas[field] = ''
            # case 3/
            if type(datas[field]) in [int, float]:
                datas[field] = str(datas[field])
        return datas
Example #51
0
def main():
    if len(sys.argv) >= 2:
        input_filename = sys.argv[1]
    else:
        print('You have to specify an input filename as a first argument.')
        return

    if len(sys.argv) >= 3:
        input_id = sys.argv[2]
    else:
        print('You have to specify student id as a second argument.')
        return

    with open(input_filename, newline='') as csvfile:
        reader = csv.DictReader(csvfile)

        # for row in reader:
        #     print(row)

        input_data = None

        if input_id == 'average':
            points_sum = {}
            count = {}
            for row in reader:
                for key, value in row.items():
                    if key == 'student':
                        continue

                    if key not in points_sum:
                        points_sum[key] = 0.0
                    points_sum[key] += float(value)

                    if key not in count:
                        count[key] = 0
                    count[key] += 1

            input_data = {}
            for key, value in points_sum.items():
                input_data[key] = value/count[key]
            input_data['student'] = 'average'

            #print(input_data)
        else:
            for row in reader:
                if row['student'] != input_id:
                    continue
                else:
                    input_data = row

        raw_points = []
        points_by_date = []

        raw_data_exercise = {}
        raw_points_per_date = {}

        for key, value in input_data.items():
            if key == 'student':
                continue
            else:
                parsed = key.strip().replace(' ', '').split('/')
                # print(parsed)

                output_key = parsed[1]

                if output_key not in raw_data_exercise:
                    raw_data_exercise[output_key] = 0

                raw_data_exercise[output_key] += float(value)

                if parsed[0] not in raw_points_per_date:
                    raw_points_per_date[parsed[0]] = 0

                raw_points_per_date[parsed[0]] += float(value)

        for key, value in raw_data_exercise.items():
            raw_points.append(value)

        for key, points in raw_points_per_date.items():
            points_by_date.append((key, points))

        # print(points_by_date)

        dates = []
        points = []
        for key, value in sorted(points_by_date, key=lambda k: k[0]):
            dates.append(datetime.strptime(key, '%Y-%m-%d').date().toordinal())
            points.append(value)

        for i in range(1, len(points)):
            points[i] = points[i - 1] + points[i]

        start_date = datetime.strptime('2018-9-17', '%Y-%m-%d').date().toordinal()
        dates = numpy.array([date - start_date for date in dates])

        # print(dates, points)

        regression_slope = numpy.linalg.lstsq([[date] for date in dates], points, rcond=-1)[0][0]

        output = {
            'passed': numpy.count_nonzero(numpy.array(raw_points)),
            'median': numpy.median(numpy.array(raw_points)),
            'mean': numpy.mean(numpy.array(raw_points)),
            'total': sum(raw_points),
            'regression slope': regression_slope,

        }
        if regression_slope != 0:
            output['date 16'] = date.fromordinal(math.floor((16.0 / regression_slope) + start_date)).__str__()
            output['date 20'] = date.fromordinal(math.floor((20.0 / regression_slope) + start_date)).__str__()

        print(json.dumps(output, indent=4))
Example #52
0
def myparser(x):
    return datetime.strptime(x, '%b %d, %Y @ %H:%M:%S.%f')
Example #53
0
 def get_unix_time(time_str):
     return datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S')
Example #54
0
def get_date_time(date_time):
    datetime_object = datetime.strptime(date_time, '%b %d %Y %I:%M%p')
    return datetime_object
 def return_modify_date(self, platform, file):
     if platform not in self.platforms:
         print('Plataforma do jogo Streets Of Rage 4 não foi encontrada.')
     else:
         createDate = datetime.strptime(ctime(stat(file).st_mtime), '%a %b %d %H:%M:%S %Y').date()
         return createDate
Example #56
0
def day_range(start_date_string, end_date_string):
    start_date = datetime.strptime(start_date_string, ISODATE).date()
    end_date = datetime.strptime(end_date_string, ISODATE).date()
    delta = end_date - start_date
    for i in range(delta.days + 1):
        yield (start_date + timedelta(i)).isoformat()
Example #57
0
    def retrieve(self, request, pk=None, format=None):
        """Get all metrics for a pod

        Arguments:
            request {[Django request]} -- The request object

        Keyword Arguments:
            pk {string} -- Name of the pod
            format {string} -- Output format to use (default: {None})

        Returns:
            Json -- Object containing all metrics for the pod
        """
        q = Q()
        since = self.request.query_params.get("since", None)

        if since is not None:
            since = datetime.strptime(since, "%Y-%m-%dT%H:%M:%S.%fZ")
            since = pytz.utc.localize(since)
            q &= Q(date__gte=since)

        summarize = self.request.query_params.get("summarize", None)

        if summarize is not None:
            summarize = int(summarize)

        metric_filter = self.request.query_params.get("metric_filter", None)

        if metric_filter:
            q &= Q(name=metric_filter)

        last_n = self.request.query_params.get("last_n", None)

        if last_n:
            last_n = int(last_n)

        metric_type = self.request.query_params.get("metric_type", "pod")

        if metric_type == "pod":
            pod = KubePod.objects.filter(name=pk).first()
            metrics = pod.metrics
        else:
            run = ModelRun.objects.get(pk=pk)
            metrics = run.metrics

        if request.accepted_renderer.format != "zip":
            # generate json
            result = self.__format_result(metrics, q, summarize, last_n)

            return Response(result, status=status.HTTP_200_OK)

        result_file = io.BytesIO()

        with zipfile.ZipFile(result_file,
                             mode="w",
                             compression=zipfile.ZIP_DEFLATED) as zf:

            if metric_type == "run":
                run = ModelRun.objects.get(pk=pk)
                pods = run.pods.all()

                filename = secure_filename(run.name)

                since = run.created_at
                until = run.finished_at

                q &= Q(date__gte=since)

                if until:
                    q &= Q(date__lte=until)

                zf = self.__format_zip_result(metrics, q, summarize, last_n,
                                              "result", zf)
                try:
                    task_result = metrics.get(name="TaskResult @ 0")

                    with io.StringIO() as task_result_file:
                        task_result_file.write(task_result.value)

                        zf.writestr("official_result.txt",
                                    task_result_file.getvalue())
                except ObjectDoesNotExist:
                    pass

                for pod in pods:
                    pod_metrics = pod.metrics
                    zf = self.__format_zip_result(pod_metrics, q, summarize,
                                                  last_n, pod.name, zf)

            else:
                zf = self.__format_zip_result(metrics, q, summarize, last_n,
                                              "result", zf)
                pod = KubePod.objects.filter(name=pk).first()
                filename = secure_filename(pod.name)

            zf.close()

            response = Response(result_file.getvalue(),
                                status=status.HTTP_200_OK)

            response[
                "content-disposition"] = "attachment; " "filename=metrics_{}.zip".format(
                    filename)
            return response
Example #58
0
from datetime import datetime
from dateutil.rrule import rrule, DAILY
from pymongo import MongoClient
from dotenv import load_dotenv

load_dotenv()

MONGO_URL = os.environ.get("MONGO_URL")
SINCE_DATE = os.environ.get("SINCE_DATE")
UNTIL_DATE = os.environ.get("UNTIL_DATE")

mongo_client = MongoClient(MONGO_URL)
db = mongo_client.get_default_database()

if SINCE_DATE:
    since_date = datetime.strptime(SINCE_DATE, "%Y-%m-%d")
else:
    latest_msg = db.company_daily_messages.find_one({}, sort=[("date", -1)])
    if latest_msg:
        since_date = latest_msg["date"]
    else:
        since_date = datetime.now()

if UNTIL_DATE:
    until_date = datetime.strptime(UNTIL_DATE, "%Y-%m-%d")
else:
    until_date = datetime.now()

print(f"since_date: {since_date}")
print(f"until_date: {until_date}")
Example #59
0
 def mtime(self):
     # assert self.exists()
     stat = self._stat()
     mtime = self.mtime_re.search(stat).group(1)
     date = datetime.strptime(mtime, "%Y-%m-%d %H:%M:%S.%f")
     return date.timestamp()
Example #60
0
 def extract_date_available_market(self, 
                                   start_, 
                                   end_, 
                                   trd_cal_=trading_calendar()):
     """
     Match NYSE trading calendar days with input days.
     """
     #define start and endDate as datetime
     startDate=datetime.strptime(start_,'%Y-%m-%d')
     endDate=datetime.strptime(end_,'%Y-%m-%d')
     
     #check if both are the same dates
     if startDate == endDate:
         
         #assign single startdate as date() only
         list_pre = [startDate.date()]
         
         #find min nearest date to set a benchmark 
         date = min(
             trd_cal_, key= lambda x: abs(x - list_pre[0])
         )
         
         #check if nearest is the same as input date
         if date == list_pre[0]:
             idx = [trd_cal_.index(date)]
             return [trd_cal_[idx[0]].strftime('%Y-%m-%d')]         
         #if not is the same
         else:
             print("No trading days at {}".format(
                 startDate.date())
                  )
             sys.exit()
     #if input dates are different 
     else:
         #check nearest date to start date
         date = min(
             trd_cal_, key=lambda x: abs(x - startDate.date())
         )
         idx_1 = trd_cal_.index(date)
         
         #check nearest date to end date
         date = min(
             trd_cal_, key=lambda x: abs(x - endDate.date())
         )
         idx_2 = trd_cal_.index(date)
         
         #make range of dates
         resulted_dates_range = trd_cal_[idx_1:idx_2+1]
         
         #check if range of dates is less than 1 (no dates)
         if len(resulted_dates_range)<1:
             print("No trading days in {} to {}".format(
                 startDate.date(), 
                 endDate.date())
                  )
             sys.exit()
             
         #if there is at least one trading day
         else:
             return [result_date_.strftime('%Y-%m-%d') 
                     for result_date_ in resulted_dates_range]