def _fix_ctrlday(ctrldate):
    return (
        (ctrldate + _timedelta(days=2))
        if ctrldate.weekday() == 5
        else (ctrldate + _timedelta(days=1))
        if ctrldate.weekday() == 6
        else ctrldate
    )
 def fix_ctrlday(self, week):
     t_day = _utc.now().date() + _timedelta(days=week * 7)
     return (
         t_day - _timedelta(days=1)
         if t_day.weekday() == 5
         else t_day + _timedelta(days=1)
         if t_day.weekday() == 6
         else t_day
     )
	def update(cls, id_emb, parto_prob):
		try:
			parto_prob = _to_yymmdd(parto_prob)
			pp = _to_date(parto_prob) - _timedelta(days=280)
			ctrls = _PreNatal(pp.year, pp.month, pp.day)
			if not ctrls.check_range():
				return (False, list())
			promos = _PrePromotional(pp.year, pp.month, pp.day).controls_dates()
			agendas_ids = list()
			with _db_session:
				pregnancy = cls.get_byId(id_emb=id_emb)
				pregnancy.set(parto_prob=parto_prob)
				_controlsCrt.delete_controls(pregnancy)
				agendas_ids = _agendasCrt.delete_agendas(pregnancy.embarazada)
				pregnancy.controles += [_Control(embarazo=pregnancy, nro_con=ctrl[0], fecha_con=ctrl[1]) for ctrl in ctrls.controls_dates()]
				_flush()
				for cn in pregnancy.controles:
					msg = _messagesCrt.get_byNumbControl(nro_control=cn.nro_con)
					_agendasCrt.save(persona=pregnancy.embarazada, mensaje=msg, fecha_con=cn.fecha_con, days=7)
				for cn in promos:
					msg = _messagesCrt.get_byNumbControl(nro_control=cn[0], tipo=3)
					_agendasCrt.save(persona=pregnancy.embarazada, mensaje=msg, fecha_con=cn[1])
				_commit()
			return (True, agendas_ids)
		except Exception, e:
			#raise e
			print 'Error: {}'.format(e)
			return (False, list())
Exemple #4
0
def list_files(datastreams, startdate, enddate=None, pattern=None):
    """
    List the files in one or more datastreams in the ARM Archive.

    Parameters
    ----------
    datastreams : str or list
        Datastream or list of datastreams
    startdate : str
        Starting date for listing, formatted as YYYYMMDD.
    enddate : str or None, optional
        Ending date for listing, formatted as YYYYMMDD. None will select the
        next day after startdate.
    pattern : str, optional
        Regular expression used to filter the datastream names,
        None will return all datastreams.

    Returns
    -------
    files : list
        List of file in datastreams

    """
    client = _init_client()
    if enddate is None:
        dt = _datetime.strptime(startdate, '%Y%m%d')
        enddate = (dt + _timedelta(days=1)).strftime('%Y%m%d')
    files = client.service.getFiles(datastreams, startdate, enddate)
    if len(files) == 1 and files[0] == 'No data files found':
        return []
    return sorted(_regex_filter(files, pattern))
Exemple #5
0
    def plot_total_activity(self, start_time, end_time, dt, activity_type='both', return_vals=False):
        start_ms = _datetime_to_tstamp(start_time)
        end_ms = _datetime_to_tstamp(end_time)
        dt_ms = dt * 1000
        time_ax = _np.arange(start_ms, end_ms, dt_ms)
        
        timefmt = _mdates.DateFormatter('%H:%M')
        fig = _plt.figure()
        ax = fig.add_subplot(111)
        ax.xaxis_date()
        ax.xaxis.set_major_formatter(timefmt)

        tot_hist = _np.zeros_like(time_ax[:-1])
        for s in self.stations:
            try:
                tot_hist += self.stations[s].activity_histogram(time_ax, activity_type=activity_type)
            except:
                print("Getting activity failed for station",\
                  "%d with error:\n  %s" %\
                  (self.stations[s].station_id, _sys.exc_info()[1]))

        times = [_tstamp_to_datetime(t) for t in time_ax[:-1]]
        ax.plot(times, tot_hist, c="0.3", lw=2)

        curr_line = _datetime(start_time.year, start_time.month, start_time.day)
        while curr_line < end_time:
            ax.axvline(curr_line, color='0.5', ls='--')
            curr_line += _timedelta(days=1)

        ax.set_xlim(start_time, end_time)
        ax.set_xlabel("Time")
        ax.set_ylabel("Total activity across city")

        if return_vals:
            return times, time_ax, tot_hist
def add_completed_entry(request, proj_pk, role_id):
    role = Role.objects.get(id=role_id)
    questionnaire = role.get_questionnaire()
    questions = questionnaire.get_questions()
    QuestionFormset = formset_factory(QuestionForm,
            extra=5,
            max_num=len(questions))
    QuestionFormset.form = staticmethod(curry(QuestionForm, role))
    if request.method == 'POST':
        d = request.POST.get('date').split('-')
        date = _date(year=int(d[0]), month=int(d[1]), day=int(d[2]))
        uid_form = UIDForm(role, date, request.POST, request.FILES)
        formset = QuestionFormset()

        if request.is_ajax():
            if uid_form.is_valid():
                uid_status=uid_form.save()
            else:
                return HttpResponse('Error')
            QuestionFormset.form = staticmethod(curry(QuestionForm, role,
                uid_status))
            formset = QuestionFormset(request.POST, request.FILES)
            if formset.is_valid():
                for form in formset:
                    form.save()
                return HttpResponse('Success')
            else:
                return HttpResponse('Error')

        if uid_form.is_valid():
            uid_status = uid_form.save()
        else:
            return render(request, 'main/add_completed_entry.html',
                    {'uid_form':uid_form, 'formset':formset,
                        'date':date, 'role':role})

        QuestionFormset.form = staticmethod(curry(QuestionForm, role,
            uid_status))

        formset = QuestionFormset(request.POST, request.FILES)
        if formset.is_valid():
            for form in formset:
                form.save()
            return HttpResponseRedirect(reverse('add-completed-entry-done',
                kwargs={'role_id':role.id}))
        else:
            return render(request, 'main/add_completed_entry.html',
                    {'uid_form':uid_form, 'formset':formset,
                        'date':date, 'role':role})
    else:
        date = _date.today() - _timedelta(days=2)
        uid_form = UIDForm(role, date)
        formset = QuestionFormset()
        return render(request, 'main/add_completed_entry.html',
                {'uid_form':uid_form, 'formset':formset,
                    'date':date, 'role':role})
Exemple #7
0
def tzset():

    """
    Reset the local time conversion rules per environment variable
    :envvar:`TZ`

    .. seealso:: :func:`time.tzset`

    """

    _tzset()

    global _TIMEDELTA_DST, _TIMEDELTA_DSTSTD, _TIMEDELTA_STD
    _TIMEDELTA_STD = _timedelta(seconds=-_localtime.timezone)
    if _localtime.daylight:
        _TIMEDELTA_DST = _timedelta(seconds=-_localtime.altzone)
    else:
        _TIMEDELTA_DST = _TIMEDELTA_STD
    _TIMEDELTA_DSTSTD = _TIMEDELTA_DST - _TIMEDELTA_STD
	def save(self, form, user_id):
		try:
			#print _to_yymmdd(form.parto_prob)
			form.parto_prob = _to_yymmdd(form.parto_prob); form.f_nac = _to_yymmdd(form.f_nac)
			pp = _to_date(form.parto_prob) - _timedelta(days=280)
			#print 'origin: {}'.format(pp)
			ctrls = _PreNatal(pp.year, pp.month, pp.day)
			if not ctrls.check_range():
				return False
			promos = _PrePromotional(pp.year, pp.month, pp.day).controls_dates()
			check_pregnant = lambda: len([i for i in form.keys() if not i.startswith('c_')]) 
			check_contact = lambda: len([i for i in form.keys() if i.startswith('c_')])
			#print check_pregnant()
			#print check_contact()
			if check_pregnant()>5:
				em_fields = {k:v for k,v in form.iteritems() if not k.startswith('c_') and k not in ['id_com','id_etn','parto_prob']}
				#print em_fields
			cn_fields = lambda: {k.replace('c_',''):v for k,v in form.iteritems() if k.startswith('c_')}
			#print cn_fields()
			with _db_session:
				tipos = [_Tipo.get(id_tip=1), _Tipo.get(id_tip=2)]
				com = _Comunidad.get(id_com=form.id_com)
				em_etn = _Etnia.get(id_etn=form.id_etn)
				if check_pregnant()>5:
					em = _Persona(comunidad=com, etnia=em_etn, tipos=[tipos[0]], **em_fields)
					_flush()
				else:
					em = _Persona.get(telf=form.telf)
					em.set(f_nac=form.f_nac, comunidad=com, etnia=em_etn)
					em.tipos += [tipos[0]]
					_flush()
				embarazo = _Embarazo(embarazada=em, parto_prob=form.parto_prob)
				embarazo.controles += [_Control(embarazo=embarazo, nro_con=ctrl[0], fecha_con=ctrl[1]) for ctrl in ctrls.controls_dates()]
				_flush()
				for cn in embarazo.controles:
					msg = _messagesCrt.get_byNumbControl(nro_control=cn.nro_con)
					_agendasCrt.save(persona=em, mensaje=msg, fecha_con=cn.fecha_con, days=7)
				for cn in promos:
					msg = _messagesCrt.get_byNumbControl(nro_control=cn[0], tipo=3)
					_agendasCrt.save(persona=em, mensaje=msg, fecha_con=cn[1])
				if check_contact()==1:
					contacto = _Persona.get(telf=form.c_telf)
					contacto.embarazadas += [em]
					if tipos[1] not in contacto.tipos:
						contacto.tipos += [tipos[1]]
					_flush()
				elif check_contact()>=5:
					cnt = _Persona(comunidad=com, tipos=[tipos[1]], **cn_fields()); _flush()
					cnt.embarazadas += [em]
				_commit()
			return True
		except Exception, e:
			#raise e
			print e
			return False
Exemple #9
0
def _get_local_tzinfo():
    """

    get local tzinfo

    @return:
    @rtype:
    """
    dst = time.localtime().tm_isdst
    tz_offset = time.altzone
    if dst:
        tz_offset += 3600
    td = _timedelta(seconds=-tz_offset)
    return _timezone(td)
Exemple #10
0
 def _setExpiration(self, value):
     if value is None:
         value = DEFAULT_EXPIRATION_TIME
     try:
         value = int(value)
     except Exception as e:
         raise TypeError("Expiration time assignment must be an integer")
     try:
         value = _timedelta(0, value, 0)
     except Exception as e:
         raise ValueError("Value cannot be understood as a delta time")
     if value > UPPER_LIMIT_EXPIRATION_TIME:
         raise OverflowError("Too big expiration time")
     else:
         self._expiration = value
Exemple #11
0
def _timedelta_from_rdf_duration(literal):

    match = _sdt.ISO8601_DURATION_RE.match(literal)

    if not match:
        raise ValueError('invalid RDF interval literal {!r}: expecting a'
                          ' literal that matches the format {!r}'
                          .format(literal, _sdt.ISO8601_DURATION_RE.pattern))

    return _timedelta(years=int(match.group('years') or 0),
                      months=int(match.group('months') or 0),
                      days=int(match.group('days') or 0),
                      hours=int(match.group('hours') or 0),
                      minutes=int(match.group('minutes') or 0),
                      seconds=int(match.group('seconds') or 0),
                      microseconds=(int(match.group('frac_seconds') or 0)
                                    * 1000000))
Exemple #12
0
    def __init__(self, organization, application=None, subsystem=None, format="conf", base_scope="user"):

        if format not in self._formats:
            raise ValueError("unregistered format {!r}".format(format))
        elif not organization:
            raise ValueError("invalid organization {!r}".format(organization))
        if application is None:
            if subsystem is not None:
                raise ValueError("application is required if subsystem is" " given")
        elif not application:
            raise ValueError("invalid application {!r}".format(application))
        if subsystem is not None and not subsystem:
            raise ValueError("invalid subsystem {!r}".format(subsystem))
        if base_scope not in ["user", "system"]:
            raise ValueError("invalid base_scope {!r}".format(base_scope))

        self._application = application
        self._base_scope_fallback = True
        self._base_scope = base_scope
        self._cache_ = {}
        self._cache_creationtime = None
        self._cache_lifespan = _timedelta(seconds=6)
        self._component_fallback_enabled = {
            ("subsystem", "application"): True,
            ("subsystem", "organization"): True,
            ("application", "organization"): True,
        }
        self._defaults = self.__class__._Defaults(self)
        self._deleting = False
        self._format = format
        self._group = None
        self._isopen = False
        self._keys_in_primarylocation = set()
        self._keystowrite = set()
        self._locations = []
        self._organization = organization
        self._previous_groups = []
        self._subsystem = subsystem

        if subsystem is not None:
            self._component_scope = "subsystem"
        elif application is not None:
            self._component_scope = "application"
        else:
            self._component_scope = "organization"
def add_uncompleted_entry(request, proj_pk, role_id):
    role = Role.objects.get(id=role_id)
    ErrorFormset = formset_factory(ErrorForm, extra=1,
            max_num = len(ErrorType.objects.all().filter(level=0)))
    ErrorFormset.form = staticmethod(curry(ErrorForm, role))
    if request.method == 'POST':
        d = request.POST.get('date').split('-')
        date = _date(year=int(d[0]), month=int(d[1]), day=int(d[2]))
        uid_form = UIDForm(role, date, request.POST, request.FILES)
        error_formset = ErrorFormset()

        if uid_form.is_valid():
            uid_status = uid_form.save()
        else:
            return render(request, 'main/add_uncompleted_entry.html',
                    {'uid_form':uid_form, 'error_formset':error_formset,
                        'date':date, 'role':role})

        ErrorFormset.form = staticmethod(curry(ErrorForm, role, uid_status))
        error_formset = ErrorFormset(request.POST, request.FILES)
        if error_formset.is_valid():
            for form in error_formset:
                form.save()
            return redirect(reverse('update-uids',
                            kwargs={'proj_pk':proj_pk}))
        else:
            return render(request, 'main/add_uncompleted_entry.html',
                    {'uid_form':uid_form, 'error_formset':error_formset,
                        'date':date, 'role':role})
    else:
        date = _date.today() - _timedelta(days=2)
        uid_form = UIDForm(role, date)
        error_formset = ErrorFormset()
        return render(request, 'main/add_uncompleted_entry.html',
                {'uid_form':uid_form, 'error_formset':error_formset,
                    'date':date, 'role':role})
Exemple #14
0
    def plot_total_empty_docks(self, start_time, end_time, dt, return_vals=False):
        start_ms = _datetime_to_tstamp(start_time)
        end_ms = _datetime_to_tstamp(end_time)
        dt_ms = dt * 1000
        time_ax = _np.arange(start_ms, end_ms, dt_ms)
        
        timefmt = _mdates.DateFormatter('%H:%M')
        fig = _plt.figure()
        ax = fig.add_subplot(111)
        ax.xaxis_date()
        ax.xaxis.set_major_formatter(timefmt)

        tot_nempty = _np.zeros_like(time_ax)
        for s in self.stations:
            try:
                tot_nempty += self.stations[s].ndocks -\
                  self.stations[s].nbikes_timeseries(time_ax)
            except:
                print("Getting total nbikes failed for station",\
                  "%d with error:\n  %s" %\
                  (self.stations[s].station_id, _sys.exc_info()[1]))

        times = [_tstamp_to_datetime(t) for t in time_ax]
        ax.plot(times, tot_nempty, c="0.3", lw=2)

        curr_line = _datetime(start_time.year, start_time.month, start_time.day)
        while curr_line < end_time:
            ax.axvline(curr_line, color='0.5', ls='--')
            curr_line += _timedelta(days=1)

        ax.set_xlim(start_time, end_time)
        ax.set_xlabel("Time")
        ax.set_ylabel("Number of empty docks across city")

        if return_vals:
            return times, time_ax, tot_nempty
Exemple #15
0
 def __init__(self, *args, **kargs):
     super(Locker, self).__init__(*args, **kargs)
     self._owner = None
     self._when = None
     self._expiration = _timedelta(0, DEFAULT_EXPIRATION_TIME, 0)
Exemple #16
0
def on_cron_every_min():
    """Send weekly mail digest
    """
    # Check if the models is specified
    models = _reg.get('content_digest.models')
    if not models:
        return

    # Check for the current day and time
    weekdays = _reg.get('content_digest.days_of_week', [])  # type: list
    time_of_day = _reg.get('content_digest.day_time', '00:00')
    if isinstance(time_of_day, _datetime):
        time_of_day = time_of_day.time()
    else:
        time_of_day = _util.parse_date_time(time_of_day).time()
    now = _datetime.now()
    now_weekday = now.weekday()

    if now.weekday() not in weekdays or not (time_of_day.hour == now.hour and
                                             time_of_day.minute == now.minute):
        return

    # Calculate days number to query collections
    prev_weekday = weekdays[weekdays.index(now_weekday) - 1]
    if prev_weekday < now_weekday:
        days_diff = (now_weekday + 1) - (prev_weekday + 1)
    else:
        days_diff = 8 - (prev_weekday + 1) + now_weekday

    # Get entities of each model
    entities = []
    entities_num = _reg.get('content_digest.entities_number', 10)
    pub_period = _datetime.now() - _timedelta(days_diff)
    for model in models:
        f = _content.find(model,
                          language='*').gte('publish_time', pub_period).sort([
                              ('views_count', _odm.I_DESC)
                          ])
        entities += list(f.get(entities_num))

    # Nothing to send
    if not entities:
        return

    # Sort all entities and cut top
    entities = sorted(entities, key=lambda e: e.views_count)[:entities_num]

    for subscriber in _odm.find('content_digest_subscriber').eq(
            'enabled', True).get():
        _logger.info('Preparing content digest for {}'.format(
            subscriber.f_get('email')))

        lng = subscriber.f_get('language')
        default_m_subject = _lang.t('content_digest@default_mail_subject',
                                    language=lng)
        m_subject = _reg.get('content_digest.mail_subject_{}'.format(lng),
                             default_m_subject)
        m_body = _tpl.render(
            _reg.get('content_digest.tpl', 'content_digest@digest'), {
                'entities': entities,
                'subscriber': subscriber,
                'language': lng,
            })
        _mail.Message(subscriber.f_get('email'), m_subject, m_body).send()
Exemple #17
0
 def __init__(self, offset):
     self.__offset = _timedelta(minutes=offset)
	def now(self):
		fix = lambda: _datetime.utcnow()-_timedelta(hours=abs(self.utc)) if self.utc<0 else _datetime.utcnow()+_timedelta(hours=abs(self.utc))
		return fix()
Exemple #19
0
def main():
    import argparse as _argparse
    import json as _json
    import os as _os
    import re as _re
    import shutil as _shutil
    import hashlib as _hashlib
    import functools as _functools
    from collections import defaultdict as  _defaultdict
    from datetime import datetime as _datetime
    from datetime import timedelta as _timedelta
    from pathlib import Path as Path

    try:
        from google_photos_takeout_helper.__version__ import __version__
    except ModuleNotFoundError:
        from __version__ import __version__

    import piexif as _piexif
    from fractions import Fraction  # piexif requires some values to be stored as rationals
    import math
    if _os.name == 'nt':
        import win32_setctime as _windoza_setctime

    parser = _argparse.ArgumentParser(
        prog='Google Photos Takeout Helper',
        usage='google-photos-takeout-helper -i [INPUT TAKEOUT FOLDER] -o [OUTPUT FOLDER]',
        description=
        """This script takes all of your photos from Google Photos takeout, 
        fixes their exif DateTime data (when they were taken) and file creation date,
        and then copies it all to one folder.
        """,
    )
    parser.add_argument('--version', action='version', version=f"%(prog)s {__version__}")
    parser.add_argument(
        '-i', '--input-folder',
        type=str,
        required=True,
        help='Input folder with all stuff from Google Photos takeout zip(s)'
    )
    parser.add_argument(
        '-o', '--output-folder',
        type=str,
        required=False,
        default='ALL_PHOTOS',
        help='Output folders which in all photos will be placed in'
    )
    parser.add_argument(
        '--skip-extras',
        action='store_true',
        help='EXPERIMENTAL: Skips the extra photos like photos that end in "edited" or "EFFECTS".'
    )
    parser.add_argument(
        '--skip-extras-harder',  # Oh yeah, skip my extras harder daddy
        action='store_true',
        help='EXPERIMENTAL: Skips the extra photos like photos like pic(1). Also includes --skip-extras.'
    )
    parser.add_argument(
        "--divide-to-dates",
        action='store_true',
        help="Create folders and subfolders based on the date the photos were taken"
    )
    parser.add_argument(
        '--albums',
        type=str,
        help="EXPERIMENTAL, MAY NOT WORK FOR EVERYONE: What kind of 'albums solution' you would like:\n"
             "'json' - written in a json file\n"
    )
    args = parser.parse_args()

    logger.info('Heeeere we go!')

    PHOTOS_DIR = Path(args.input_folder)
    FIXED_DIR = Path(args.output_folder)

    TAG_DATE_TIME_ORIGINAL = _piexif.ExifIFD.DateTimeOriginal
    TAG_DATE_TIME_DIGITIZED = _piexif.ExifIFD.DateTimeDigitized
    TAG_DATE_TIME = 306
    TAG_PREVIEW_DATE_TIME = 50971

    photo_formats = ['.jpg', '.jpeg', '.png', '.webp', '.bmp', '.tif', '.tiff', '.svg', '.heic']
    video_formats = ['.mp4', '.gif', '.mov', '.webm', '.avi', '.wmv', '.rm', '.mpg', '.mpe', '.mpeg', '.mkv', '.m4v',
                     '.mts', '.m2ts']
    extra_formats = [
        '-edited', '-effects', '-smile', '-mix',  # EN/US
        '-edytowane',  # PL
        # Add more "edited" flags in more languages if you want. They need to be lowercase.
    ]

    # Album Multimap
    album_mmap = _defaultdict(list)

    # Duplicate by full hash multimap
    files_by_full_hash = _defaultdict(list)

    # holds all the renamed files that clashed from their
    rename_map = dict()

    _all_jsons_dict = _defaultdict(dict)

    # Statistics:
    s_removed_duplicates_count = 0
    s_copied_files = 0
    s_cant_insert_exif_files = []  # List of files where inserting exif failed
    s_date_from_folder_files = []  # List of files where date was set from folder name
    s_skipped_extra_files = []  # List of extra files ("-edited" etc) which were skipped
    s_no_json_found = []  # List of files where we couldn't find json
    s_no_date_at_all = []  # List of files where there was absolutely no option to set correct date

    FIXED_DIR.mkdir(parents=True, exist_ok=True)

    def for_all_files_recursive(
      dir: Path,
      file_function=lambda fi: True,
      folder_function=lambda fo: True,
      filter_fun=lambda file: True
    ):
        for file in dir.rglob("*"):
            if file.is_dir():
                folder_function(file)
                continue
            elif file.is_file():
                if filter_fun(file):
                    file_function(file)
            else:
                logger.debug(f'Found something weird... {file}')

    # This is required, because windoza crashes when timestamp is negative
    # https://github.com/joke2k/faker/issues/460#issuecomment-308897287
    # This (dynamic assigning a function) mayyy be a little faster than comparing it every time (?)
    datetime_from_timestamp = (lambda t: _datetime(1970, 1, 1) + _timedelta(seconds=int(t))) \
        if _os.name == 'nt' \
        else _datetime.fromtimestamp
    timestamp_from_datetime = (lambda dt: (dt - _datetime(1970, 1, 1)).total_seconds()) \
        if _os.name == 'nt' \
        else _datetime.timestamp

    def is_photo(file: Path):
        if file.suffix.lower() not in photo_formats:
            return False
        # skips the extra photo file, like edited or effects. They're kinda useless.
        nonlocal s_skipped_extra_files
        if args.skip_extras or args.skip_extras_harder:  # if the file name includes something under the extra_formats, it skips it.
            for extra in extra_formats:
                if extra in file.name.lower():
                    s_skipped_extra_files.append(str(file.resolve()))
                    return False
        if args.skip_extras_harder:
            search = r"\(\d+\)\."  # we leave the period in so it doesn't catch folders.
            if bool(_re.search(search, file.name)):
                # PICT0003(5).jpg -> PICT0003.jpg      The regex would match "(5).", and replace it with a "."
                plain_file = file.with_name(_re.sub(search, '.', str(file)))
                # if the original exists, it will ignore the (1) file, ensuring there is only one copy of each file.
                if plain_file.is_file():
                    s_skipped_extra_files.append(str(file.resolve()))
                    return False
        return True

    def is_video(file: Path):
        if file.suffix.lower() not in video_formats:
            return False
        return True

    def chunk_reader(fobj, chunk_size=1024):
        """ Generator that reads a file in chunks of bytes """
        while True:
            chunk = fobj.read(chunk_size)
            if not chunk:
                return
            yield chunk

    def get_hash(file: Path, first_chunk_only=False, hash_algo=_hashlib.sha1):
        hashobj = hash_algo()
        with open(file, "rb") as f:
            if first_chunk_only:
                hashobj.update(f.read(1024))
            else:
                for chunk in chunk_reader(f):
                    hashobj.update(chunk)
        return hashobj.digest()

    def populate_album_map(path: Path, filter_fun=lambda f: (is_photo(f) or is_video(f))):
        if not path.is_dir():
            raise NotADirectoryError('populate_album_map only handles directories not files')

        meta_file_exists = find_album_meta_json_file(path)
        if meta_file_exists is None or not meta_file_exists.exists():
            return False

        # means that we are processing an album so process
        for file in path.rglob("*"):
            if not (file.is_file() and filter_fun(file)):
                continue
            file_name = file.name
            # If it's not in the output folder
            if not (FIXED_DIR / file.name).is_file():
                full_hash = None
                try:
                    full_hash = get_hash(file, first_chunk_only=False)
                except Exception as e:
                    logger.debug(e)
                    logger.debug(f"populate_album_map - couldn't get hash of {file}")
                if full_hash is not None and full_hash in files_by_full_hash:
                    full_hash_files = files_by_full_hash[full_hash]
                    if len(full_hash_files) != 1:
                        logger.error("full_hash_files list should only be one after duplication removal, bad state")
                        exit(-5)
                        return False
                    file_name = full_hash_files[0].name

            # check rename map in case there was an overlap namechange
            if str(file) in rename_map:
                file_name = rename_map[str(file)].name

            album_mmap[file.parent.name].append(file_name)

    # PART 3: removing duplicates

    # THIS IS PARTLY COPIED FROM STACKOVERFLOW
    # https://stackoverflow.com/questions/748675/finding-duplicate-files-and-removing-them
    #
    # We now use an optimized version linked from tfeldmann
    # https://gist.github.com/tfeldmann/fc875e6630d11f2256e746f67a09c1ae
    #
    # THANK YOU Todor Minakov (https://github.com/tminakov) and Thomas Feldmann (https://github.com/tfeldmann)
    #
    # NOTE: defaultdict(list) is a multimap, all init array handling is done internally 
    # See: https://en.wikipedia.org/wiki/Multimap#Python
    #
    def find_duplicates(path: Path, filter_fun=lambda file: True):
        files_by_size = _defaultdict(list)
        files_by_small_hash = _defaultdict(list)

        for file in path.rglob("*"):
            if file.is_file() and filter_fun(file):
                try:
                    file_size = file.stat().st_size
                except (OSError, FileNotFoundError):
                    # not accessible (permissions, etc) - pass on
                    continue
                files_by_size[file_size].append(file)

        # For all files with the same file size, get their hash on the first 1024 bytes
        logger.info('Calculating small hashes...')
        for file_size, files in _tqdm(files_by_size.items(), unit='files-by-size'):
            if len(files) < 2:
                continue  # this file size is unique, no need to spend cpu cycles on it

            for file in files:
                try:
                    small_hash = get_hash(file, first_chunk_only=True)
                except OSError:
                    # the file access might've changed till the exec point got here
                    continue
                files_by_small_hash[(file_size, small_hash)].append(file)

        # For all files with the hash on the first 1024 bytes, get their hash on the full
        # file - if more than one file is inserted on a hash here they are certinly duplicates
        logger.info('Calculating full hashes...')
        for files in _tqdm(files_by_small_hash.values(), unit='files-by-small-hash'):
            if len(files) < 2:
                # the hash of the first 1k bytes is unique -> skip this file
                continue

            for file in files:
                try:
                    full_hash = get_hash(file, first_chunk_only=False)
                except OSError:
                    # the file access might've changed till the exec point got here
                    continue

                files_by_full_hash[full_hash].append(file)

    # Removes all duplicates in folder
    # ONLY RUN AFTER RUNNING find_duplicates()
    def remove_duplicates():
        nonlocal s_removed_duplicates_count
        # Now we have populated the final multimap of absolute dups, We now can attempt to find the original file
        # and remove all the other duplicates
        for files in _tqdm(files_by_full_hash.values(), unit='duplicates'):
            if len(files) < 2:
                continue  # this file size is unique, no need to spend cpu cycles on it

            s_removed_duplicates_count += len(files) - 1
            for file in files:
                # TODO reconsider which dup we delete these now that we're searching globally?
                if len(files) > 1:
                    file.unlink()
                    files.remove(file)
        return True

    # PART 1: Fixing metadata and date-related stuff

    # Returns json dict
    def find_json_for_file(file: Path):
        parenthesis_regexp = r'\([0-9]+\)'
        parenthesis = _re.findall(parenthesis_regexp, file.name)
        if len(parenthesis) == 1:
            # Fix for files that have as image/video IMG_1234(1).JPG with a json IMG_1234.JPG(1).json
            stripped_filename = _re.sub(parenthesis_regexp, '', file.name)
            potential_json = file.with_name(stripped_filename + parenthesis[0] + '.json')
        else:
            potential_json = file.with_name(file.name + '.json')

        if potential_json.is_file():
            try:
                with open(potential_json, 'r') as f:
                    json_dict = _json.load(f)
                return json_dict
            except:
                raise FileNotFoundError(f"Couldn't find json for file: {file}")

        nonlocal _all_jsons_dict
        # Check if we need to load this folder
        if file.parent not in _all_jsons_dict:
            for json_file in file.parent.rglob("*.json"):
                try:
                    with json_file.open('r') as f:
                        json_dict = _json.load(f)
                        if "title" in json_dict:
                            # We found a JSON file with a proper title, store the file name
                            _all_jsons_dict[file.parent][json_dict["title"]] = json_dict
                except:
                    logger.debug(f"Couldn't open json file {json_file}")

        # Check if we have found the JSON file among all the loaded ones in the folder
        if file.parent in _all_jsons_dict and file.name in _all_jsons_dict[file.parent]:
            # Great we found a valid JSON file in this folder corresponding to this file
            return _all_jsons_dict[file.parent][file.name]
        else:
            nonlocal s_no_json_found
            s_no_json_found.append(str(file.resolve()))
            raise FileNotFoundError(f"Couldn't find json for file: {file}")

    # Returns date in 2019:01:01 23:59:59 format
    def get_date_from_folder_meta(dir: Path):
        file = find_album_meta_json_file(dir)
        if not file:
            logger.debug("Couldn't pull datetime from album meta")
            return None
        try:
            with open(str(file), 'r') as fi:
                album_dict = _json.load(fi)
                # find_album_meta_json_file *should* give us "safe" file
                time = int(album_dict["albumData"]["date"]["timestamp"])
                return datetime_from_timestamp(time).strftime('%Y:%m:%d %H:%M:%S')
        except KeyError:
            logger.error(
                "get_date_from_folder_meta - json doesn't have required stuff "
                "- that probably means that either google f****d us again, or find_album_meta_json_file"
                "is seriously broken"
            )

        return None

    @_functools.lru_cache(maxsize=None)
    def find_album_meta_json_file(dir: Path):
        for file in dir.rglob("*.json"):
            try:
                with open(str(file), 'r') as f:
                    dict = _json.load(f)
                    if "albumData" in dict:
                        return file
            except Exception as e:
                logger.debug(e)
                logger.debug(f"find_album_meta_json_file - Error opening file: {file}")

        return None

    def set_creation_date_from_str(file: Path, str_datetime):
        try:
            # Turns out exif can have different formats - YYYY:MM:DD, YYYY/..., YYYY-... etc
            # God wish that americans won't have something like MM-DD-YYYY
            # The replace ': ' to ':0' fixes issues when it reads the string as 2006:11:09 10:54: 1.
            # It replaces the extra whitespace with a 0 for proper parsing
            str_datetime = str_datetime.replace('-', ':').replace('/', ':').replace('.', ':') \
                               .replace('\\', ':').replace(': ', ':0')[:19]
            timestamp = timestamp_from_datetime(
                _datetime.strptime(
                    str_datetime,
                    '%Y:%m:%d %H:%M:%S'
                )
            )
            _os.utime(file, (timestamp, timestamp))
            if _os.name == 'nt':
                _windoza_setctime.setctime(str(file), timestamp)
        except Exception as e:
            raise ValueError(f"Error setting creation date from string: {str_datetime}")

    def set_creation_date_from_exif(file: Path):
        try:
            # Why do you need to be like that, Piexif...
            exif_dict = _piexif.load(str(file))
        except Exception as e:
            raise IOError("Can't read file's exif!")
        tags = [['0th', TAG_DATE_TIME], ['Exif', TAG_DATE_TIME_ORIGINAL], ['Exif', TAG_DATE_TIME_DIGITIZED]]
        datetime_str = ''
        date_set_success = False
        for tag in tags:
            try:
                datetime_str = exif_dict[tag[0]][tag[1]].decode('UTF-8')
                set_creation_date_from_str(file, datetime_str)
                date_set_success = True
                break
            except KeyError:
                pass  # No such tag - continue searching :/
            except ValueError:
                logger.debug("Wrong date format in exif!")
                logger.debug(datetime_str)
                logger.debug("does not match '%Y:%m:%d %H:%M:%S'")
        if not date_set_success:
            raise IOError('No correct DateTime in given exif')

    def set_file_exif_date(file: Path, creation_date):
        try:
            exif_dict = _piexif.load(str(file))
        except:  # Sorry but Piexif is too unpredictable
            exif_dict = {'0th': {}, 'Exif': {}}

        creation_date = creation_date.encode('UTF-8')
        exif_dict['0th'][TAG_DATE_TIME] = creation_date
        exif_dict['Exif'][TAG_DATE_TIME_ORIGINAL] = creation_date
        exif_dict['Exif'][TAG_DATE_TIME_DIGITIZED] = creation_date

        try:
            _piexif.insert(_piexif.dump(exif_dict), str(file))
        except Exception as e:
            logger.debug("Couldn't insert exif!")
            logger.debug(e)
            nonlocal s_cant_insert_exif_files
            s_cant_insert_exif_files.append(str(file.resolve()))

    def get_date_str_from_json(json):
        return datetime_from_timestamp(
            int(json['photoTakenTime']['timestamp'])
        ).strftime('%Y:%m:%d %H:%M:%S')

    # ========= THIS IS ALL GPS STUFF =========

    def change_to_rational(number):
        """convert a number to rantional
        Keyword arguments: number
        return: tuple like (1, 2), (numerator, denominator)
        """
        f = Fraction(str(number))
        return f.numerator, f.denominator

    # got this here https://github.com/hMatoba/piexifjs/issues/1#issuecomment-260176317
    def degToDmsRational(degFloat):
        min_float = degFloat % 1 * 60
        sec_float = min_float % 1 * 60
        deg = math.floor(degFloat)
        deg_min = math.floor(min_float)
        sec = round(sec_float * 100)

        return [(deg, 1), (deg_min, 1), (sec, 100)]

    def set_file_geo_data(file: Path, json):
        """
        Reads the geoData from google and saves it to the EXIF. This works assuming that the geodata looks like -100.12093, 50.213143. Something like that.

        Written by DalenW.
        :param file:
        :param json:
        :return:
        """

        # prevents crashes
        try:
            exif_dict = _piexif.load(str(file))
        except:
            exif_dict = {'0th': {}, 'Exif': {}}

        # converts a string input into a float. If it fails, it returns 0.0
        def _str_to_float(num):
            if type(num) == str:
                return 0.0
            else:
                return float(num)

        # fallbacks to GeoData Exif if it wasn't set in the photos editor.
        # https://github.com/TheLastGimbus/GooglePhotosTakeoutHelper/pull/5#discussion_r531792314
        longitude = _str_to_float(json['geoData']['longitude'])
        latitude = _str_to_float(json['geoData']['latitude'])
        altitude = _str_to_float(json['geoData']['altitude'])

        # Prioritise geoData set from GPhotos editor. If it's blank, fall back to geoDataExif
        if longitude == 0 and latitude == 0:
            longitude = _str_to_float(json['geoDataExif']['longitude'])
            latitude = _str_to_float(json['geoDataExif']['latitude'])
            altitude = _str_to_float(json['geoDataExif']['altitude'])

        # latitude >= 0: North latitude -> "N"
        # latitude < 0: South latitude -> "S"
        # longitude >= 0: East longitude -> "E"
        # longitude < 0: West longitude -> "W"

        if longitude >= 0:
            longitude_ref = 'E'
        else:
            longitude_ref = 'W'
            longitude = longitude * -1

        if latitude >= 0:
            latitude_ref = 'N'
        else:
            latitude_ref = 'S'
            latitude = latitude * -1

        # referenced from https://gist.github.com/c060604/8a51f8999be12fc2be498e9ca56adc72
        gps_ifd = {
            _piexif.GPSIFD.GPSVersionID: (2, 0, 0, 0)
        }

        # skips it if it's empty
        if latitude != 0 or longitude != 0:
            gps_ifd.update({
                _piexif.GPSIFD.GPSLatitudeRef: latitude_ref,
                _piexif.GPSIFD.GPSLatitude: degToDmsRational(latitude),

                _piexif.GPSIFD.GPSLongitudeRef: longitude_ref,
                _piexif.GPSIFD.GPSLongitude: degToDmsRational(longitude)
            })

        if altitude != 0:
            gps_ifd.update({
                _piexif.GPSIFD.GPSAltitudeRef: 1,
                _piexif.GPSIFD.GPSAltitude: change_to_rational(round(altitude))
            })

        gps_exif = {"GPS": gps_ifd}
        exif_dict.update(gps_exif)

        try:
            _piexif.insert(_piexif.dump(exif_dict), str(file))
        except Exception as e:
            logger.debug("Couldn't insert geo exif!")
            # local variable 'new_value' referenced before assignment means that one of the GPS values is incorrect
            logger.debug(e)

    # ============ END OF GPS STUFF ============

    # Fixes ALL metadata, takes just file and dir and figures it out
    def fix_metadata(file: Path):
        # logger.info(file)

        has_nice_date = False
        try:
            set_creation_date_from_exif(file)
            has_nice_date = True
        except (_piexif.InvalidImageDataError, ValueError, IOError) as e:
            logger.debug(e)
            logger.debug(f'No exif for {file}')
        except IOError:
            logger.debug('No creation date found in exif!')

        try:
            google_json = find_json_for_file(file)
            date = get_date_str_from_json(google_json)
            set_file_geo_data(file, google_json)
            set_file_exif_date(file, date)
            set_creation_date_from_str(file, date)
            has_nice_date = True
            return
        except FileNotFoundError as e:
            logger.debug(e)

        if has_nice_date:
            return True

        logger.debug(f'Last option, copying folder meta as date for {file}')
        date = get_date_from_folder_meta(file.parent)
        if date is not None:
            set_file_exif_date(file, date)
            set_creation_date_from_str(file, date)
            nonlocal s_date_from_folder_files
            s_date_from_folder_files.append(str(file.resolve()))
            return True
        else:
            logger.warning(f'There was literally no option to set date on {file}')
            nonlocal s_no_date_at_all
            s_no_date_at_all.append(str(file.resolve()))

        return False

    # PART 2: Copy all photos and videos to target folder

    # Makes a new name like 'photo(1).jpg'
    def new_name_if_exists(file: Path):
        new_name = file
        i = 1
        while True:
            if not new_name.is_file():
                return new_name
            else:
                new_name = file.with_name(f"{file.stem}({i}){file.suffix}")
                rename_map[str(file)] = new_name
                i += 1

    def copy_to_target(file: Path):
        if is_photo(file) or is_video(file):
            new_file = new_name_if_exists(FIXED_DIR / file.name)
            _shutil.copy2(file, new_file)
            nonlocal s_copied_files
            s_copied_files += 1
        return True

    def copy_to_target_and_divide(file: Path):
        creation_date = file.stat().st_mtime
        date = datetime_from_timestamp(creation_date)

        new_path = FIXED_DIR / f"{date.year}/{date.month:02}/"
        new_path.mkdir(parents=True, exist_ok=True)

        new_file = new_name_if_exists(new_path / file.name)
        _shutil.copy2(file, new_file)
        nonlocal s_copied_files
        s_copied_files += 1
        return True

    # xD python lambdas are shit - this is only because we can't do 2 commands, so we do them in arguments
    def _walk_with_tqdm(res, bar: _tqdm):
        bar.update()
        return res

    # Count *all* photo and video files - this is hacky, and we should use .rglob altogether instead of is_photo
    logger.info("Counting how many input files we have ahead...")
    _input_files_count = 0
    for ext in _tqdm(photo_formats + video_formats, unit='formats'):
        _input_files_count += len(list(PHOTOS_DIR.rglob(f'**/*{ext}')))
    logger.info(f'Input files: {_input_files_count}')

    logger.info('=====================')
    logger.info('Fixing files metadata and creation dates...')
    # tqdm progress bar stuff
    _metadata_bar = _tqdm(total=_input_files_count, unit='files')

    for_all_files_recursive(
        dir=PHOTOS_DIR,
        file_function=lambda f: _walk_with_tqdm(fix_metadata(f), _metadata_bar),
        # TODO (probably never, but should): Change this maybe to path.rglob
        filter_fun=lambda f: (is_photo(f) or is_video(f))
    )
    _metadata_bar.close()
    logger.info('=====================')

    logger.info('=====================')
    _copy_bar = _tqdm(total=_input_files_count, unit='files')
    if args.divide_to_dates:
        logger.info('Creating subfolders and dividing files based on date...')
        for_all_files_recursive(
            dir=PHOTOS_DIR,
            file_function=lambda f: _walk_with_tqdm(copy_to_target_and_divide(f), _copy_bar),
            filter_fun=lambda f: (is_photo(f) or is_video(f))
        )
    else:
        logger.info('Copying all files to one folder...')
        logger.info('(If you want, you can get them organized in folders based on year and month.'
                    ' Run with --divide-to-dates to do this)')
        for_all_files_recursive(
            dir=PHOTOS_DIR,
            file_function=lambda f: _walk_with_tqdm(copy_to_target(f), _copy_bar),
            filter_fun=lambda f: (is_photo(f) or is_video(f))
        )
    _copy_bar.close()
    logger.info('=====================')
    logger.info('=====================')
    logger.info('Finding duplicates...')
    find_duplicates(FIXED_DIR, lambda f: (is_photo(f) or is_video(f)))
    logger.info('Removing duplicates...')
    remove_duplicates()
    logger.info('=====================')
    if args.albums is not None:
        if args.albums.lower() == 'json':
            logger.info('=====================')
            logger.info('Populate json file with albums...')
            logger.info('=====================')
            for_all_files_recursive(
                dir=PHOTOS_DIR,
                folder_function=populate_album_map
            )
            file = PHOTOS_DIR / 'albums.json'
            with open(file, 'w', encoding="utf-8") as outfile:
                _json.dump(album_mmap, outfile)
            logger.info(str(file))

    logger.info('')
    logger.info('DONE! FREEEEEDOOOOM!!!')
    logger.info('')
    logger.info("Final statistics:")
    logger.info(f"Files copied to target folder: {s_copied_files}")
    logger.info(f"Removed duplicates: {s_removed_duplicates_count}")
    logger.info(f"Files for which we couldn't find json: {len(s_no_json_found)}")
    if len(s_no_json_found) > 0:
        with open(PHOTOS_DIR / 'no_json_found.txt', 'w', encoding="utf-8") as f:
            f.write("# This file contains list of files for which there was no corresponding .json file found\n")
            f.write("# You might find it useful, but you can safely delete this :)\n")
            f.write("\n".join(s_no_json_found))
            logger.info(f" - you have full list in {f.name}")
    logger.info(f"Files where inserting new exif failed: {len(s_cant_insert_exif_files)}")
    if len(s_cant_insert_exif_files) > 0:
        logger.info("(This is not necessary bad thing - pretty much all videos fail, "
                    "and your photos probably have their original exif already")
        with open(PHOTOS_DIR / 'failed_inserting_exif.txt', 'w', encoding="utf-8") as f:
            f.write("# This file contains list of files where setting right exif date failed\n")
            f.write("# You might find it useful, but you can safely delete this :)\n")
            f.write("\n".join(s_cant_insert_exif_files))
            logger.info(f" - you have full list in {f.name}")
    logger.info(f"Files where date was set from name of the folder: {len(s_date_from_folder_files)}")
    if len(s_date_from_folder_files) > 0:
        with open(PHOTOS_DIR / 'date_from_folder_name.txt', 'w', encoding="utf-8") as f:
            f.write("# This file contains list of files where date was set from name of the folder\n")
            f.write("# You might find it useful, but you can safely delete this :)\n")
            f.write("\n".join(s_date_from_folder_files))
            logger.info(f" - you have full list in {f.name}")
    if args.skip_extras or args.skip_extras_harder:
        # Remove duplicates: https://www.w3schools.com/python/python_howto_remove_duplicates.asp
        s_skipped_extra_files = list(dict.fromkeys(s_skipped_extra_files))
        logger.info(f"Extra files that were skipped: {len(s_skipped_extra_files)}")
        with open(PHOTOS_DIR / 'skipped_extra_files.txt', 'w', encoding="utf-8") as f:
            f.write("# This file contains list of extra files (ending with '-edited' etc) which were skipped because "
                    "you've used either --skip-extras or --skip-extras-harder\n")
            f.write("# You might find it useful, but you can safely delete this :)\n")
            f.write("\n".join(s_skipped_extra_files))
            logger.info(f" - you have full list in {f.name}")
    if len(s_no_date_at_all) > 0:
        logger.info('')
        logger.info(f"!!! There were {len(s_no_date_at_all)} files where there was absolutely no way to set "
                    f"a correct date! They will probably appear at the top of the others, as their 'last modified' "
                    f"value is set to moment of downloading your takeout :/")
        with open(PHOTOS_DIR / 'unsorted.txt', 'w', encoding="utf-8") as f:
            f.write("# This file contains list of files where there was no way to set correct date!\n")
            f.write("# You probably want to set their dates manually - but you can delete this if you want\n")
            f.write("\n".join(s_no_date_at_all))
            logger.info(f" - you have full list in {f.name}")

    logger.info('')
    logger.info('Sooo... what now? You can see README.md for what nice G Photos alternatives I found and recommend')
    logger.info('')
    logger.info('If I helped you, you can consider donating me: https://www.paypal.me/TheLastGimbus')
    logger.info('Have a nice day!')
Exemple #20
0
for other time zones.

"""

__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"

from datetime import timedelta as _timedelta, tzinfo as _tzinfo
import time as _localtime
from time \
    import mktime as _unixtime_from_localtime_timestruct, \
           localtime as _localtime_timestruct_from_unixtime, \
           tzset as _tzset


_TIMEDELTA_ZERO = _timedelta(0)

_TIMEDELTA_DST = _TIMEDELTA_ZERO

_TIMEDELTA_DSTSTD = _TIMEDELTA_ZERO

_TIMEDELTA_STD = _TIMEDELTA_ZERO


def tzset():

    """
    Reset the local time conversion rules per environment variable
    :envvar:`TZ`

    .. seealso:: :func:`time.tzset`
Exemple #21
0
 def __init__(self, minutes):
     if abs(minutes) > 1439:
         raise ValueError("Time-zone offset is too large,", minutes)
     self.__minutes = minutes
     self.__offset = _timedelta(minutes=minutes)
Exemple #22
0
def _get_local_tzinfo():
    """

    get local tzinfo

    @return:
    @rtype:
    """
    dst = time.localtime().tm_isdst
    tz_offset = time.altzone
    if dst:
        tz_offset += 3600
    td = _timedelta(seconds=-tz_offset)
    return _timezone(td)

__tz_info = _timezone(_timedelta(0))


class DateStringError(BatchError, ValueError):
    pass


class HandlerMismatch(DateStringError):
    pass


# Begin list of handler functions for known formats.
# We assume that the LabView software doesn't suck
# at recording timestamps (though it kind of does, which
# is why they don't all have the same format), and that
# we don't need to check for errors, such as passing
Exemple #23
0
class AlpacaDataSource(DataSourceBase):
    DELTA = _timedelta(days=1)
    def __init__(self, alpaca_key: str, alpaca_secret: str, config: _Config,
                 live: bool):
        self._alpaca_key = alpaca_key
        self._alpaca_secret = alpaca_secret
        self._api = _tradeapi.REST(self._alpaca_key, self._alpaca_secret,
                                   api_version='v2')

        self._conn = _StreamConn(self._alpaca_key, self._alpaca_secret)

        self.buffer_bar = []
        self.cursor = 0
        self.data_callback = None

        super().__init__(config, live)
        
        if not self.live:
            self._get_ticker_array()

    def _daylight_savings_offset(self, date):
        tz = pytz.timezone('America/New_York')
        offset_seconds = tz.utcoffset(date).seconds
        offset_hours = offset_seconds / 3600.0
        offset_hours -= 24
        if (offset_hours == -4.00):
            return "-04:00"
        else:
            return "-05:00"
        # return ("{:+02d}:{:02d}".format(int(offset_hours), int((offset_hours % 1) * 60)))

    def _create_bar_backtest(self, bar, ticker):
        return _Bar(bar.t, bar.o, bar.c, bar.h, bar.l, bar.v, ticker)

    def _create_bar_live(self, bar):
        return _Bar(bar.timestamp, bar.open, bar.close, bar.high, bar.low, bar.volume, bar.symbol)

    def _get_ticker_array(self):
        self.start_time = self.config.start_time
        self.end_time = self.config.end_time

        current_day = self.start_time
        while current_day != self.end_time:
            current_day_str = current_day.strftime("%Y-%m-%d")
            if _np.is_busday(current_day_str):
                start_day = current_day + _timedelta(hours=9, minutes=30)
                end_day = current_day + _timedelta(hours=16)

                start_day_str = start_day.strftime("%Y-%m-%dT%H:%M:%S" + self._daylight_savings_offset(current_day))
                end_day_str = end_day.strftime("%Y-%m-%dT%H:%M:%S" + self._daylight_savings_offset(current_day))

                for t in self.config.tickers:
                    barset = self._api.get_barset(t, 'minute',
                                                  start=start_day_str,
                                                  end=end_day_str)
                    bars = barset[t]
                    bar_set = [ self._create_bar_backtest(b, t) for b in bars]
                    self.buffer_bar.extend(bar_set)
                    _time.sleep(0.5)

            current_day += AlpacaDataSource.DELTA
        self.buffer_bar.sort(key=lambda x: x.timestamp)

    def reset(self) -> bool:
        self.data_callback = None
        if not self.live:
            self.cursor = 0

        return True

    def has_next(self) -> bool:
        if not self.live:
            return self.cursor < len(self.buffer_bar)
        else:
            return True

    def next(self) -> BarList:
        if not self.live:
            tickers = [ self.buffer_bar[self.cursor] ]
            self.cursor += 1
            return tickers

    def current(self) -> BarList:
        return [ self.buffer_bar[self.cursor] ]

    def set_callback(self, data_callback):
        self.data_callback = data_callback
        if self.live:
            @self._conn.on(r'^AM.*$', self.config.tickers)
            async def on_bar(conn, channel, bar):
                processed_bar = self._create_bar_live(bar)
                self.data_callback([processed_bar])

    def start_feed(self):
        if self.data_callback != None:
            if not self.live:
                while(self.has_next()):
                    datum = self.next()
                    self.data_callback(datum)
            else:
                ticker_array = [ 'AM.' + t for t in self.config.tickers ]
                sself._conn.run(ticker_array)
def __add_days(inc, date):
    # timedelta handles days, even negatives.
    retdate = date + _timedelta(days=inc)
    return retdate
Exemple #25
0
 def save(self, form, user_id):
     try:
         #print _to_yymmdd(form.parto_prob)
         form.parto_prob = _to_yymmdd(form.parto_prob)
         form.f_nac = _to_yymmdd(form.f_nac)
         pp = _to_date(form.parto_prob) - _timedelta(days=280)
         #print 'origin: {}'.format(pp)
         ctrls = _PreNatal(pp.year, pp.month, pp.day)
         if not ctrls.check_range():
             return False
         promos = _PrePromotional(pp.year, pp.month,
                                  pp.day).controls_dates()
         check_pregnant = lambda: len(
             [i for i in form.keys() if not i.startswith('c_')])
         check_contact = lambda: len(
             [i for i in form.keys() if i.startswith('c_')])
         #print check_pregnant()
         #print check_contact()
         if check_pregnant() > 5:
             em_fields = {
                 k: v
                 for k, v in form.iteritems() if not k.startswith('c_')
                 and k not in ['id_com', 'id_etn', 'parto_prob']
             }
             #print em_fields
         cn_fields = lambda: {
             k.replace('c_', ''): v
             for k, v in form.iteritems() if k.startswith('c_')
         }
         #print cn_fields()
         with _db_session:
             tipos = [_Tipo.get(id_tip=1), _Tipo.get(id_tip=2)]
             com = _Comunidad.get(id_com=form.id_com)
             em_etn = _Etnia.get(id_etn=form.id_etn)
             if check_pregnant() > 5:
                 em = _Persona(comunidad=com,
                               etnia=em_etn,
                               tipos=[tipos[0]],
                               **em_fields)
                 _flush()
             else:
                 em = _Persona.get(telf=form.telf)
                 em.set(f_nac=form.f_nac, comunidad=com, etnia=em_etn)
                 em.tipos += [tipos[0]]
                 _flush()
             embarazo = _Embarazo(embarazada=em, parto_prob=form.parto_prob)
             embarazo.controles += [
                 _Control(embarazo=embarazo,
                          nro_con=ctrl[0],
                          fecha_con=ctrl[1])
                 for ctrl in ctrls.controls_dates()
             ]
             _flush()
             for cn in embarazo.controles:
                 msg = _messagesCrt.get_byNumbControl(
                     nro_control=cn.nro_con)
                 _agendasCrt.save(persona=em,
                                  mensaje=msg,
                                  fecha_con=cn.fecha_con,
                                  days=7)
             for cn in promos:
                 msg = _messagesCrt.get_byNumbControl(nro_control=cn[0],
                                                      tipo=3)
                 _agendasCrt.save(persona=em, mensaje=msg, fecha_con=cn[1])
             if check_contact() == 1:
                 contacto = _Persona.get(telf=form.c_telf)
                 contacto.embarazadas += [em]
                 if tipos[1] not in contacto.tipos:
                     contacto.tipos += [tipos[1]]
                 _flush()
             elif check_contact() >= 5:
                 cnt = _Persona(comunidad=com,
                                tipos=[tipos[1]],
                                **cn_fields())
                 _flush()
                 cnt.embarazadas += [em]
             _commit()
         return True
     except Exception, e:
         #raise e
         print e
         return False
Exemple #26
0
def fetch_xml(instrument, dt_from=None, dt_to=None):
    """
    Get the XML responses from the Nexus Sharepoint calendar for one,
    multiple, or all instruments.

    Parameters
    ----------
    instrument : :py:class:`~nexusLIMS.instruments.Instrument`
        As defined in :py:func:`~.get_events`,
        one of the NexusLIMS instruments contained in the
        :py:attr:`~nexusLIMS.instruments.instrument_db` database.
        Controls what instrument calendar is used to get events
    dt_from : :py:class:`~datetime.datetime` or None
        A :py:class:`~datetime.datetime` object representing the start of a
        calendar event to search for.
        If ``dt_from`` and ``dt_to`` are `None`, no date filtering will be done.
        If just ``dt_from`` is `None`, all events from the beginning of the
        calendar record will be returned up until ``dt_to``.
    dt_to : :py:class:`~datetime.datetime` or None
        A :py:class:`~datetime.datetime` object representing the end of
        calendar event to search for.
        If ``dt_from`` and ``dt_to`` are `None`, no date filtering will be done.
        If just ``dt_to`` is `None`, all events from the ``dt_from`` to the
        present will be returned.

    Returns
    -------
    api_response : str
        A string containing the XML calendar information for each
        instrument requested, stripped of the empty default namespace. If
        ``dt_from`` and ``dt_to`` are provided, it will contain just one
        `"entry"` representing a single event on the calendar

    Notes
    -----
    To find the right event, an API request to the Sharepoint Calendar will
    be made for all events starting on the same day as ``dt_from``. This
    could result in multiple events being returned if there is more than one
    session scheduled on that microscope for that day. To find the right one,
    the timespan between each event's ``StartTime`` and ``EndTime`` returned
    from the calendar will be compared with the timespan between ``dt_from`` and
    ``dt_to``. The event with the greatest overlap will be taken as the
    correct one. This approach should allow for some flexibility in terms of
    non-exact matching between the reserved timespans and those recorded by
    the session logger.
    """

    # Paths for Nexus Instruments that can be booked through sharepoint
    # Instrument names can be found at
    # https://**REMOVED**/**REMOVED**/_vti_bin/ListData.svc
    # and
    # https://gitlab.nist.gov/gitlab/nexuslims/NexusMicroscopyLIMS/wikis/Sharepoint-Calendar-Information

    # Parse instrument parameter input, leaving inst_to_fetch as list of
    # nexuslims.instruments.Instrument objects
    if isinstance(instrument, str):
        # try to convert from instrument PID string to actual instrument
        try:
            instrument = _instr_db[instrument]
        except KeyError:
            raise KeyError('Entered instrument string "{}" could not be '
                           'parsed'.format(instrument))
    elif isinstance(instrument, _Instrument):
        pass
    else:
        raise ValueError('Entered instrument '
                         '"{}" could not be parsed'.format(instrument))

    api_response = ''

    instr_url = instrument.api_url + '?$expand=CreatedBy,UserName'

    # build the date filtering string depending on datetime input
    if dt_from is None and dt_to is None:
        pass
    elif dt_from is None:
        # for API, we need to add a day to dt_to so we can use "lt" as filter
        to_str = (dt_to + _timedelta(days=1)).strftime('%Y-%m-%d')
        instr_url += f"&$filter=StartTime lt DateTime'{to_str}'"
    elif dt_to is None:
        # for API, we subtract day from dt_from to ensure we don't miss any
        # sessions close to the UTC offset (mostly for sessions scheduled at
        # midnight)
        from_str = (dt_from - _timedelta(days=1)).strftime('%Y-%m-%d')
        instr_url += f"&$filter=StartTime ge DateTime'{from_str}'"
    else:
        # we ask the API for all events that start on same day as dt_from
        from_str = (dt_from - _timedelta(days=1)).strftime('%Y-%m-%d')
        to_str = (dt_from + _timedelta(days=1)).strftime('%Y-%m-%d')
        instr_url += f"&$filter=StartTime ge DateTime'{from_str}' and " \
                     f"StartTime lt DateTime'{to_str}'"

    _logger.info("Fetching Nexus calendar events from {}".format(instr_url))
    r = _nexus_req(instr_url, _requests.get)
    _logger.info("  {} -- {} -- response: {}".format(instrument.name,
                                                     instr_url, r.status_code))

    if r.status_code == 401:
        # Authentication did not succeed and we received an *Unauthorized*
        # response from the server
        raise AuthenticationError('Could not authenticate to the Nexus '
                                  'SharePoint Calendar. Please check the '
                                  'credentials and try again.')

    if r.status_code == 200:
        # XML elements have a default namespace prefix (Atom format),
        # but lxml does not like an empty prefix, so it is easiest to
        # just sanitize the input and remove the namespaces as in
        # https://stackoverflow.com/a/18160164/1435788:
        xml = _re.sub(r'\sxmlns="[^"]+"', '', r.text, count=1)

        # API returns utf-8 encoding, so encode correctly
        xml = bytes(xml, encoding='utf-8')
        api_response = xml
    else:
        raise _requests.exceptions.\
            ConnectionError('Could not access Nexus SharePoint Calendar '
                            'API at "{}"'.format(instr_url))

    # identify which event matches the one we searched for (if there's more
    # than one, and we supplied both dt_from and dt_to) and remove the other
    # events from the api response as needed
    if dt_from is not None and dt_to is not None:
        doc = _etree.fromstring(api_response)
        entries = doc.findall('entry')
        # more than one calendar event was found for this date
        if len(entries) > 1:
            starts, ends = [], []
            for e in entries:
                ns = _etree.fromstring(xml).nsmap
                starts.append(e.find('.//d:StartTime', namespaces=ns).text)
                ends.append(e.find('.//d:EndTime', namespaces=ns).text)
            starts = [_datetime.fromisoformat(s) for s in starts]
            ends = [_datetime.fromisoformat(e) for e in ends]

            # starts and ends are lists of datetimes representing the start and
            # end of each event returned by the API, so get how much each
            # range overlaps with the range dt_from to dt_to
            overlaps = [
                _get_timespan_overlap((dt_from, dt_to), (s, e))
                for s, e in zip(starts, ends)
            ]

            # find which 'entry' is the one that matches our timespan
            max_overlap = overlaps.index(max(overlaps))
            # create a list of entry indices to remove by excluding the one
            # with maximal overlap
            to_remove = list(range(len(overlaps)))
            del to_remove[max_overlap]

            # loop through in reverse order so we don't mess up the numbering
            # of the entry elements
            for idx in to_remove[::-1]:
                # XPath numbering starts at 1, so add one to idx
                doc.remove(doc.find(f'entry[{idx + 1}]'))

            # api_response will now have non-relevant entry items removed
            api_response = _etree.tostring(doc)

    return api_response
def register_grids(title, folder, gridlist, time=None, initial=None,
                   dtime=None, namefmt=None, fill_method=0, fill_value=0,
                   extremes=0, extreme_value=0, description='', units=''):
    # Check inputs and create time vector
    op = -1  # type of input date

    if type(time) in [list, tuple]:
        time = list(time)
        if type(time[0]) is not _datetime:
            raise TypeError('Parameter time must have datetime elements')
        op = 0  # input time

    elif initial is not None and dtime is not None:
        if type(initial) is not _datetime:
            raise TypeError('Parameter initial must be a datetime')
        if type(dtime) not in [_timedelta, int]:
            raise TypeError('Parameter initial must be a timedelta')
        elif type(dtime) is int:
            dtime = _timedelta(dtime)
        op = 1

    elif namefmt is not None:
        if type(namefmt) is str:
            raise TypeError('Parameter namefmt must be a string')
        op = 2

    if op == -1:
        raise IOError('Time values missing (time, initial, dtime, namefmt)')

    # Get time list
    if op == 1:  # use initial and dtime
        nl = len(gridlist)
        time = [initial + dtime * cnt for cnt in range(nl)]
    elif op == 2:
        time = []
        for filename in gridlist:
            basename = _os.path.basename(_os.path.splitext(filename)[0])
            time.append(_datetime.strptime(basename, namefmt))

    # Check time and grid list
    if len(time) != len(gridlist):
        raise IOError('time and gridlist must have the same number of elements')

    # Register time grids
    df = _pd.DataFrame(data=gridlist, columns=['grids'])
    df.index = _pd.to_datetime(time)  # register time index
    df.index.name = 'date'
    # sort by date
    df.sort_index(ascending=True, inplace=True)

    # Create object
    title = _os.path.basename(_os.path.splitext(title)[0])
    tgo = TimeGridObj()                # create temporal object
    tgo.dataset = df                   # save pandas frame
    tgo.folder = folder                # set folder data base
    tgo.title = title                  # set title
    tgo.fill_method = fill_method      # set filling method
    tgo.fill_value = fill_value        # set filling value
    tgo.extremes = extremes            # set extremes method
    tgo.extreme_value = extreme_value  # set extreme value
    tgo.description = description      # set description
    tgo.units = units                  # set units

    # Register file
    tgo.register()
 def _sweep(self):
     cutoff_time = _datetime.now() - _timedelta(minutes=self.max_minutes)
     self._list = [x for x in self._list if x[0] >= cutoff_time]
Exemple #29
0
 def func(timestamp):
     for key in _TIME_KEYS[index + 1:]:
         timestamp -= _timedelta(**{f'{key}s': getattr(timestamp, key)})
     return timestamp
Exemple #30
0
    'x33s!hGv',
    'wc6s[<P=',
    '*rQk$w7J',
    '?}RzK3ks'
]

INVALID_PASSWORDS = [
    {'password': '******', 'missingRequirements': ['lengthError', 'digitError', 'symbolError']},
    {'password': '******', 'missingRequirements': ['digitError', 'symbolError']},
    {'password': '******', 'missingRequirements': ['symbolError']},
    {'password': '******', 'missingRequirements': ['lengthError', 'digitError']},
    {'password': '******', 'missingRequirements': ['lowercaseError', 'digitError', 'symbolError']},
    {'password': '******', 'missingRequirements': ['lowercaseError', 'uppercaseError', 'symbolError']},
]

ONE_DAY = _timedelta(days=1)
def _utctoday():  # noqa:E302
    return datetime.utcnow().replace(tzinfo=timezone.utc).date()


EXISTING_BILLS = [
    {
        'id': 11, 'type': BillType.Monthly, 'name': 'Cellphone',
        'cost': 39.94, 'date': _utctoday().replace(day=9)
    },
    {
        'id': 12, 'type': BillType.Monthly, 'name': 'Rent',
        'cost': 1492.66, 'date': _utctoday().replace(day=1)
    },
    {
        'id': 13, 'type': BillType.Yearly, 'name': 'Domain Name',
Exemple #31
0
jupiter_mu = jupiter_mass * constant_of_gravitation
saturn_mu = saturn_mass * constant_of_gravitation
uranus_mu = uranus_mass * constant_of_gravitation
neptune_mu = neptune_mass * constant_of_gravitation

mercury_radius_polar = mercury_radius_mean = mercury_radius_equatorial
venus_radius_polar = venus_radius_mean = venus_radius_equatorial

# The following constants are not from IAU
earth_radius_mean = 6371.0 * _kilo
earth_radius_polar = 6356.8 * _kilo

mars_radius_mean = 3389.5 * _kilo
mars_radius_polar = 3376.2 * _kilo

jupiter_radius_mean = 69911 * _kilo
jupiter_radius_polar = 66854 * _kilo

saturn_radius_mean = 58232 * _kilo
saturn_radius_polar = 54364 * _kilo

uranus_radius_mean = 25362 * _kilo
uranus_radius_polar = 24973 * _kilo

neptune_radius_mean = 24622 * _kilo
neptune_radius_polar = 24341 * _kilo

# 4.1 s, 56 minutes, 23 hours
earth_sidereal_day = _timedelta(hours=23, minutes=56,
                                seconds=4.1).total_seconds()
Exemple #32
0
from datetime import datetime as _datetime
from datetime import timedelta as _timedelta
from datetime import timezone as _timezone

from . import constants as _constants
from . import format as _format


# ---------- Constants ----------

HISTORIC_DATA_NOTE = 'This is historic data from'

MONTH_NAME_TO_NUMBER = {v.lower(): k for k, v in enumerate(_month_name) if k > 0}
MONTH_SHORT_NAME_TO_NUMBER = {v.lower(): k for k, v in enumerate(_month_abbr) if k > 0}

FIFTEEN_HOURS: _timedelta = _timedelta(hours=15)
FIVE_MINUTES: _timedelta = _timedelta(minutes=5)
ONE_DAY: _timedelta = _timedelta(days=1)
ONE_SECOND: _timedelta = _timedelta(seconds=1)
ONE_WEEK: _timedelta = _timedelta(days=7)


# ---------- Functions ----------

def get_first_of_following_month(utc_now: _datetime) -> _datetime:
    year = utc_now.year
    month = utc_now.month + 1
    if (month == 13):
        year += 1
        month = 1
    result = _datetime(year, month, 1, 0, 0, 0, 0, _timezone.utc)
Exemple #33
0
 def now(self):
     fix = lambda: _datetime.utcnow() - _timedelta(hours=abs(
         self.utc)) if self.utc < 0 else _datetime.utcnow() + _timedelta(
             hours=abs(self.utc))
     return fix()
Exemple #34
0
 def __init__(self, minutes):
     if abs(minutes) > 1439:
         raise ValueError("Time-zone offset is too large,", minutes)
     self.__minutes = minutes
     self.__offset = _timedelta(minutes=minutes)
Exemple #35
0
 def _sum_args(self):
     return {'start': _datetime(2000, 1, 1, tzinfo=_tz.FixedOffset(30)),
             'offset': _timedelta(days=399, hours=4, minutes=5, seconds=6,
                                  microseconds=7)}
Exemple #36
0
 def __init__(self, *a, **kw):
     _tzinfo.__init__(self, *a, **kw)
     self.__dstoffset = self.__stdoffset = _timedelta(seconds=-_time.timezone)
     if _time.daylight: self.__dstoffset = _timedelta(seconds=-_time.altzone)
     self.__dstdiff = self.__dstoffset - self.__stdoffset
Exemple #37
0
from pysphere.ZSI import _floattypes, _inttypes, EvaluateException
from pysphere.ZSI.TC import SimpleType
from pysphere.ZSI.wstools.Namespaces import SCHEMA
import operator, re, time as _time
from time import mktime as _mktime, localtime as _localtime, gmtime as _gmtime
from datetime import tzinfo as _tzinfo, timedelta as _timedelta,\
    datetime as _datetime, MAXYEAR
from math import modf as _modf

MINYEAR = 1970

# Year, month or day may be None
_niltime = [None] * 3 + [0] * 6

#### Code added to check current timezone offset
_zero = _timedelta(0)


class _localtimezone(_tzinfo):
    def __init__(self, *a, **kw):
        _tzinfo.__init__(self, *a, **kw)
        self.__dstoffset = self.__stdoffset = _timedelta(
            seconds=-_time.timezone)
        if _time.daylight:
            self.__dstoffset = _timedelta(seconds=-_time.altzone)
        self.__dstdiff = self.__dstoffset - self.__stdoffset

    """ """

    def dst(self, dt):
        """datetime -> DST offset in minutes east of UTC."""
Exemple #38
0
def download(start=None, end=None, form='4'):
    """Download insider trading data from SEC
    :Parameters:
        start: str
            Download start date string (YYYY-MM-DD) or datetime.
            Default is 1 days ago
        end: str
            Download end date string (YYYY-MM-DD) or datetime.
            Default is yesterday
        form: str
            Desired SEC form to return. Form types can and descriptions can 
            be found at https://www.sec.gov/forms
            Default is form 4 (and currently the only one supported)
    :note:
        The SEC is always a day behind so this script will never
        attempt to download today's data
    """
    # create list of urls to download
    if not start:
        start=(_datetime.today()-_timedelta(days=1)).strftime('%Y-%m-%d')
    if not end:
        end=(_datetime.today()-_timedelta(days=1)).strftime('%Y-%m-%d')
        
    urls = ['https://www.sec.gov/Archives/edgar/daily-index/'+
            date.strftime('%Y')+'/QTR'+str((int(date.month)-1)//3 + 1)+
            '/company.'+ date.strftime('%Y%m%d') + '.idx'
            for date in _date_list_generator(start, end)]
    
    # download using threadpool
    results = _download_thread(urls)

    # convert results to pandas dataframe
    dfs = [_idx_to_dataframe(page, url) for page, url in results]
    
    # combine dataframes
    df = _append_dataframes(dfs)
    
    # filter to desired forms
    if form:
        if not isinstance(form, str):
            raise TypeError("form type must be a string")
        else:
            df = df.loc[df['form_type'] == form].reset_index(drop=True)
            
    # create list of urls for each form
    urls = ['https://www.sec.gov/Archives/'+x for x in list(df.file_name)]
        
    # download forms using threadpool
    results = _download_thread(urls)
    
    # qc forms and remove forms hard for the machine to interpret
    dfs = [_form_qc(x[0], x[1]) for x in results]
    dfs = [x for x in dfs if x is not None]
    
    # combine dataframes
    df = _append_dataframes(dfs)
    
    # clean dataframe
    df = _clean_download(df)
    
    # return dataframe
    return df
Exemple #39
0
import certifi as _certifi
import tempfile as _tempfile
import os as _os
import subprocess as _sp
from datetime import timedelta as _timedelta
from os.path import getmtime as _getmtime
from warnings import warn
import logging as _logging
import sys as _sys

_logger = _logging.getLogger(__name__)
_logger.setLevel(_logging.INFO)

# hours to add to datetime objects (hack for poole testing -- should be -2 if
# running tests from Mountain Time on files in Eastern Time)
tz_offset = _timedelta(hours=0)


def setup_loggers(log_level):
    """
    Set logging level of all NexusLIMS loggers

    Parameters
    ----------
    log_level : int
        The level of logging, such as ``logging.DEBUG``
    """
    _logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: '
                         '%(message)s',
                         level=log_level)
    loggers = [
Exemple #40
0
def download_pea_prods(dest,
                       most_recent=True,
                       dates=None,
                       ac='igs',
                       out_dict=False,
                       trop_vmf3=False,
                       brd_typ='igs',
                       snx_typ='igs',
                       clk_sel='clk',
                       repro3=False):
    '''
    Download necessary pea product files for date/s provided
    '''
    if dest[-1] != '/':
        dest += '/'

    if most_recent:
        snx_vars_out = download_most_recent(dest=dest,
                                            f_type='snx',
                                            ac=snx_typ,
                                            dwn_src='cddis',
                                            f_dict_out=True,
                                            gpswkD_out=True,
                                            ftps_out=True)
        f_dict, gpswkD_out, ftps = snx_vars_out

        clk_vars_out = download_most_recent(dest=dest,
                                            f_type=clk_sel,
                                            ac=ac,
                                            dwn_src='cddis',
                                            f_dict_out=True,
                                            gpswkD_out=True,
                                            ftps_out=True)
        f_dict_update, gpswkD_out, ftps = clk_vars_out
        f_dict.update(f_dict_update)
        gpswkD = gpswkD_out['clk_gpswkD'][0]

        if most_recent == True:
            num = 1
        else:
            num = most_recent

        dt0 = gpswkD2dt(gpswkD)
        dtn = dt0 - _timedelta(days=num - 1)

        if dtn == dt0:
            dt_list = [dt0]
        else:
            dates = _pd.date_range(start=str(dtn), end=str(dt0), freq='1D')
            dates = list(dates)
            dates.reverse()
            dt_list = sorted(dates_type_convert(dates))
    else:
        dt_list = sorted(dates_type_convert(dates))

    dest_pth = _Path(dest)
    # Output dict for the files that are downloaded
    if not out_dict:
        out_dict = {
            'dates': dt_list,
            'atxfiles': ['igs14.atx'],
            'blqfiles': ['OLOAD_GO.BLQ']
        }

    # Get the ATX file if not present already:
    if not (dest_pth / 'igs14.atx').is_file():
        if not dest_pth.is_dir():
            dest_pth.mkdir(parents=True)
        url = 'https://files.igs.org/pub/station/general/igs14.atx'
        check_n_download_url(url, dwndir=dest)

    # Get the BLQ file if not present already:
    if not (dest_pth / 'OLOAD_GO.BLQ').is_file():
        url = 'https://peanpod.s3-ap-southeast-2.amazonaws.com/pea/examples/EX03/products/OLOAD_GO.BLQ'
        check_n_download_url(url, dwndir=dest)

    # For the troposphere, have two options: gpt2 or vmf3. If flag is set to True, download 6-hourly trop files:
    if trop_vmf3:
        # If directory for the Tropospheric model files doesn't exist, create it:
        if not (dest_pth / 'grid5').is_dir():
            (dest_pth / 'grid5').mkdir(parents=True)
        for dt in dt_list:
            year = dt.strftime('%Y')
            # Create urls to the four 6-hourly files associated with the tropospheric model
            begin_url = f'https://vmf.geo.tuwien.ac.at/trop_products/GRID/5x5/VMF3/VMF3_OP/{year}/'
            f_begin = 'VMF3_' + dt.strftime('%Y%m%d') + '.H'
            urls = [
                begin_url + f_begin + en for en in ['00', '06', '12', '18']
            ]
            urls.append(begin_url + 'VMF3_' +
                        (dt + _timedelta(days=1)).strftime('%Y%m%d') + '.H00')
            # Run through model files, downloading if they are not in directory
            for url in urls:
                if not (dest_pth / f'grid5/{url[-17:]}').is_file():
                    check_n_download_url(url, dwndir=str(dest_pth / 'grid5'))
    else:
        # Otherwise, check for GPT2 model file or download if necessary:
        if not (dest_pth / 'gpt_25.grd').is_file():
            url = 'https://peanpod.s3-ap-southeast-2.amazonaws.com/pea/examples/EX03/products/gpt_25.grd'
            check_n_download_url(url, dwndir=dest)

    if repro3:
        snx_typ = ac
    standards = ['sp3', 'erp', clk_sel]
    ac_typ_dict = {ac_sel: [] for ac_sel in [ac, brd_typ, snx_typ]}
    for typ in standards:
        ac_typ_dict[ac].append(typ)
    ac_typ_dict[brd_typ].append('rnx')

    if not most_recent:
        f_dict = {}
        ac_typ_dict[snx_typ].append('snx')

    # Download product files of each type from CDDIS for the given dates:
    for ac in ac_typ_dict:
        if most_recent:
            f_dict_update = download_prod(dates=dt_list,
                                          dest=dest,
                                          ac=ac,
                                          f_type=ac_typ_dict[ac],
                                          dwn_src='cddis',
                                          f_dict=True,
                                          ftps=ftps)
        elif repro3:
            f_dict_update = download_prod(dates=dt_list,
                                          dest=dest,
                                          ac=ac,
                                          f_type=ac_typ_dict[ac],
                                          dwn_src='cddis',
                                          f_dict=True,
                                          repro3=True)
        else:
            f_dict_update = download_prod(dates=dt_list,
                                          dest=dest,
                                          ac=ac,
                                          f_type=ac_typ_dict[ac],
                                          dwn_src='cddis',
                                          f_dict=True)
        f_dict.update(f_dict_update)

    f_types = []
    for el in list(ac_typ_dict.values()):
        for typ in el:
            f_types.append(typ)
    if most_recent:
        f_types.append('snx')

    # Prepare the output dictionary based on the downloaded files:
    for f_type in f_types:
        if f_type == 'rnx':
            out_dict[f'navfiles'] = sorted(f_dict[f_type])
        out_dict[f'{f_type}files'] = sorted(f_dict[f_type])

    return out_dict
Exemple #41
0
from math import modf as _modf

_niltime = [
    0,
    0,
    0,  # year month day
    0,
    0,
    0,  # hour minute second
    0,
    0,
    0  # weekday, julian day, dst flag
]

#### Code added to check current timezone offset
_zero = _timedelta(0)
_dstoffset = _stdoffset = _timedelta(seconds=-_time.timezone)
if _time.daylight: _dstoffset = _timedelta(seconds=-_time.altzone)
_dstdiff = _dstoffset - _stdoffset


class _localtimezone(_tzinfo):
    """ """
    def dst(self, dt):
        """datetime -> DST offset in minutes east of UTC."""
        tt = _localtime(
            _mktime((dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
                     dt.weekday(), 0, -1)))
        if tt.tm_isdst > 0: return _dstdiff
        return _zero
	def __to_workingDays(self, odate):
		return ((odate + _timedelta(days=2)) if (odate.weekday()==5) else (odate + _timedelta(days=1)) if (odate.weekday()==6) else odate) if self.workindays else odate
 def insert(self, i, value, backdate_minutes=0):
     d = _datetime.now() - _timedelta(minutes=backdate_minutes)
     return self._list.insert(i, (d, value))
	def check_range(self):
		pp = self.startDate + _timedelta(days=280)
		days_left = (pp - _utc.now().date()).days
		return -4 <= days_left <= 280
	def __init__(self, startDate, Days=True):
		self.workindays = True
		self.__tmpCtrls = [self.__to_workingDays(startDate + _timedelta(days=(i if Days else i*7))) for i in self.limits]
Exemple #46
0
def spp(station, orbit, system="G", cut_off=7.0):
    start = time.time()  # Time of start
    if len(system) > 1:
        raise Warning(
            "SPP does not support multiple satellite system | This feature will be implemented in the next version"
        )
    observation_list = _observation_picker(station, system)
    gnss = gnssDataframe(station, orbit, system, cut_off)
    #-----------------------------------------------------------------------------
    if len(observation_list) >= 2:
        carrierPhase1 = getattr(gnss, observation_list[0][2])
        carrierPhase2 = getattr(gnss, observation_list[1][2])
        pseudorange1 = getattr(gnss, observation_list[0][3])
        pseudorange2 = getattr(gnss, observation_list[1][3])
        frequency1 = observation_list[0][4]
        frequency2 = observation_list[1][4]
    else:
        raise Warning("Ionosphere-free combination is not available")
    # ----------------------------------------------------------------------------
    gnss["Ionosphere_Free"] = (frequency1**2 * pseudorange1 - frequency2**2 *
                               pseudorange2) / (frequency1**2 - frequency2**2)
    gnss = gnss.dropna(subset=['Ionosphere_Free'])
    gnss["Travel_time"] = gnss["Ionosphere_Free"] / _CLIGHT
    gnss["X_Reception"], gnss["Y_Reception"], gnss[
        "Z_Reception"] = _reception_coord(gnss.X, gnss.Y, gnss.Z, gnss.Vx,
                                          gnss.Vy, gnss.Vz, gnss.Travel_time)
    epochList = gnss.index.get_level_values("Epoch").unique().sort_values()
    epoch_start = epochList[0]
    epoch_offset = _timedelta(seconds=300)
    epoch_interval = _timedelta(seconds=station.interval - 0.000001)
    epoch_stop = epochList[-1] + _timedelta(seconds=0.000001)
    approx_position = [
        station.approx_position[0], station.approx_position[1],
        station.approx_position[2]
    ]
    receiver_clock = station.receiver_clock
    position_list = []
    while True:
        epoch_step = epoch_start + epoch_interval
        gnss_temp = gnss.xs((slice(epoch_start, epoch_step))).copy()
        for iter in range(6):
            distance = _distance_euclidean(approx_position[0],
                                           approx_position[1],
                                           approx_position[2],
                                           gnss_temp.X_Reception,
                                           gnss_temp.Y_Reception,
                                           gnss_temp.Z_Reception)
            gnss_temp["Distance"] = distance + _sagnac(
                approx_position[0], approx_position[1], approx_position[2],
                gnss_temp.X_Reception, gnss_temp.Y_Reception,
                gnss_temp.Z_Reception)
            gnss_temp["Azimuth"], gnss_temp["Elevation"], gnss_temp[
                "Zenith"] = _azel(station.approx_position[0],
                                  station.approx_position[1],
                                  station.approx_position[2], gnss_temp.X,
                                  gnss_temp.Y, gnss_temp.Z, gnss_temp.Distance)
            gnss_temp["Tropo"] = tropospheric_delay(station.approx_position[0],
                                                    station.approx_position[1],
                                                    station.approx_position[2],
                                                    gnss_temp.Elevation,
                                                    station.epoch)
            coeffMatrix = _np.zeros([len(gnss_temp), 4])
            coeffMatrix[:, 0] = (approx_position[0] -
                                 gnss_temp.X_Reception) / gnss_temp.Distance
            coeffMatrix[:, 1] = (approx_position[1] -
                                 gnss_temp.Y_Reception) / gnss_temp.Distance
            coeffMatrix[:, 2] = (approx_position[2] -
                                 gnss_temp.Z_Reception) / gnss_temp.Distance
            coeffMatrix[:, 3] = 1
            lMatrix = gnss_temp.Ionosphere_Free - gnss_temp.Distance + _CLIGHT * (
                gnss_temp.DeltaTSV + gnss_temp.Relativistic_clock -
                receiver_clock) - gnss_temp.Tropo
            lMatrix = _np.array(lMatrix)
            try:
                linearEquationSolution = _np.linalg.lstsq(coeffMatrix,
                                                          lMatrix,
                                                          rcond=None)
                xMatrix = linearEquationSolution[0]
                pos = [
                    approx_position[0] + xMatrix[0],
                    approx_position[1] + xMatrix[1],
                    approx_position[2] + xMatrix[2],
                    receiver_clock + xMatrix[3] / _CLIGHT
                ]
                approx_position[0], approx_position[1], approx_position[
                    2], receiver_clock = pos[0], pos[1], pos[2], pos[3]
            except:
                print("Cannot solve normal equations for epoch", epoch_start,
                      "| Skipping...")
        position_list.append(pos)
        epoch_start += epoch_offset
        epoch_step += epoch_offset
        if (epoch_step - epoch_stop) > _timedelta(seconds=station.interval):
            break
    x_coordinate = _np.mean([pos[0] for pos in position_list])
    y_coordinate = _np.mean([pos[1] for pos in position_list])
    z_coordinate = _np.mean([pos[2] for pos in position_list])
    rec_clock = _np.mean([pos[3] for pos in position_list])
    finish = time.time()  # Time of finish
    print("Pseudorange calculation is done in",
          "{0:.2f}".format(finish - start), "seconds.")
    return (x_coordinate, y_coordinate, z_coordinate, rec_clock)
Exemple #47
0
from ZSI.TC import TypeCode, SimpleType
from ZSI.wstools.Namespaces import SCHEMA
import operator, re, time as _time
from time import mktime as _mktime, localtime as _localtime, gmtime as _gmtime
from datetime import tzinfo as _tzinfo, timedelta as _timedelta,\
    datetime as _datetime
from math import modf as _modf

_niltime = [
    0, 0, 0,    # year month day
    0, 0, 0,    # hour minute second
    0, 0, 0     # weekday, julian day, dst flag
]

#### Code added to check current timezone offset
_zero = _timedelta(0)
_dstoffset = _stdoffset = _timedelta(seconds=-_time.timezone)
if _time.daylight: _dstoffset = _timedelta(seconds=-_time.altzone)
_dstdiff = _dstoffset - _stdoffset


class _localtimezone(_tzinfo):
    """ """
    def dst(self, dt):
        """datetime -> DST offset in minutes east of UTC."""
        tt = _localtime(_mktime((dt.year, dt.month, dt.day,
                 dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)))
        if tt.tm_isdst > 0: return _dstdiff
        return _zero
    
    #def fromutc(...)
Exemple #48
0
# coding: utf-8
from __future__ import absolute_import

import random
import re
from datetime import (
    date,
    time,
    datetime,
    timedelta as _timedelta,
)

from .. import compat

ZERO_DELTA = _timedelta()


def _to_datetime(obj):
    if isinstance(obj, datetime):
        return obj
    if isinstance(obj, date):
        return datetime.combine(obj, time(0))
    if isinstance(obj, compat.inttype):
        return datetime(obj, 1, 1)
    if isinstance(obj, (list, tuple)):
        return datetime(*obj)
    if isinstance(obj, dict):
        return datetime(**obj)
    if isinstance(obj, compat.basestring):
        obj = re.findall(r'[0-9]+', obj)
        return datetime(*map(int, obj))
Exemple #49
0
'''

from ZSI import _copyright, _floattypes, _inttypes, _get_idstr, EvaluateException
from ZSI.TC import TypeCode, SimpleType
from ZSI.wstools.Namespaces import SCHEMA
import operator, re, time as _time
from time import mktime as _mktime, localtime as _localtime, gmtime as _gmtime
from datetime import tzinfo as _tzinfo, timedelta as _timedelta,\
    datetime as _datetime, MINYEAR, MAXYEAR
from math import modf as _modf

# Year, month or day may be None
_niltime = [None] * 3 + [0] * 6

#### Code added to check current timezone offset
_zero = _timedelta(0)

class _localtimezone(_tzinfo):
    def __init__(self, *a, **kw):
        _tzinfo.__init__(self, *a, **kw)
        self.__dstoffset = self.__stdoffset = _timedelta(seconds=-_time.timezone)
        if _time.daylight: self.__dstoffset = _timedelta(seconds=-_time.altzone)
        self.__dstdiff = self.__dstoffset - self.__stdoffset
        
    """ """
    def dst(self, dt):
        """datetime -> DST offset in minutes east of UTC."""
        tt = _localtime(_mktime((dt.year, dt.month, dt.day,
                 dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)))
        if tt.tm_isdst > 0: return self.__dstdiff
        return _zero
Exemple #50
0
from datetime import datetime as _datetime
from datetime import timedelta as _timedelta
try:
    from .logger import Logger as _Logger
except:
    from logger import Logger as _Logger
from threading import currentThread as _currentThread

__author__ = "Sergi Blanch-Torné"
__copyright__ = "Copyright 2016, CELLS / ALBA Synchrotron"
__license__ = "GPLv3+"

__all__ = ["Locker"]

DEFAULT_EXPIRATION_TIME = 60  # seconds
UPPER_LIMIT_EXPIRATION_TIME = _timedelta(0, DEFAULT_EXPIRATION_TIME * 10, 0)


class Locker(_Logger):
    """
        Object to control the access to certain areas of the code, similar idea
        than a Semaphore or a Lock from the threading library, but not the
        same.

        An external process talks with a service thread, and what the thread
        does
    """
    def __init__(self, *args, **kargs):
        super(Locker, self).__init__(*args, **kargs)
        self._owner = None
        self._when = None
Exemple #51
0
 def __init__(self, offset):
     self.__offset = _timedelta(minutes=offset)
Exemple #52
0
# coding: utf-8
from __future__ import absolute_import

import random
import re
from datetime import (
    date,
    time,
    datetime,
    timedelta as _timedelta,
)

from .. import compat

ZERO_DELTA = _timedelta()


def _to_datetime(obj):
    if isinstance(obj, datetime):
        return obj
    if isinstance(obj, date):
        return datetime.combine(obj, time(0))
    if isinstance(obj, compat.inttype):
        return datetime(obj, 1, 1)
    if isinstance(obj, (list, tuple)):
        return datetime(*obj)
    if isinstance(obj, dict):
        return datetime(**obj)
    if isinstance(obj, compat.basestring):
        obj = re.findall(r"[0-9]+", obj)
        return datetime(*map(int, obj))