Example #1
0
    def delta_period(self, datas, code, timecode):
        """Consolidate datas by period

        datas : Array(2)
          - datetime
          - value
        code : string
        timecode : string

        Return : dict
        """
        datas.sort()
        old = datas[0]
        result = {}
        delta = 0
        for data in datas:
            if date.strftime(data[0], timecode) == date.strftime(old[0], timecode):
                delta = delta + (data[1] - old[1])
                key = "{}_{}".format(code, date.strftime(data[0], timecode))
                result[key] = delta
            else:
                delta = delta + (data[1] - old[1])
                key = "{}_{}".format(code, date.strftime(old[0], timecode))
                result[key] = delta
                delta = 0
            old = data
        return result
Example #2
0
def generate_badge():
    conn = get_conn(DB)
    c = conn.cursor()
    c.execute("SELECT * FROM samples ORDER BY DATE DESC LIMIT 1")
    row = c.fetchone()
    c.close()

    date = dateparser.parse(row[0])
    co2 = str(int(row[1]))

    timestamp = date.strftime("%I:%M%p")
    datestamp = date.strftime("%m/%d") if (datetime.now() - date).days > 0 else ""

    print "Generating badge: %s - %s %s" % (co2, timestamp, datestamp)

    image = Image.open("template.png")
    draw = ImageDraw.ImageDraw(image)
    font = ImageFont.truetype("arial.ttf", 10)
    draw.setfont(font)
    dw, dh = font.getsize(datestamp)
    draw.text((30 - dw / 2, 78 - dh), datestamp)
    tw, th = font.getsize(timestamp)
    draw.text((30 - tw / 2, 77 - th - dh), timestamp)
    font = ImageFont.truetype("arial.ttf", 26)
    draw.setfont(font)
    cw, ch = font.getsize(co2)
    draw.text((30 - cw / 2, 8), co2)
    image.save("webroot/badge.png")
Example #3
0
def load_project_variants_from_vcf(project_id, vcf_files):
    """
    Load any families and cohorts in this project that aren't loaded already
    """
    print("Called load_project_variants_from_vcf on " + str(vcf_files))
    print "Loading project %s" % project_id
    print(date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S  -- loading project: " + project_id + " - db.variants cache"))
    project = Project.objects.get(project_id=project_id)

    for vcf_file in vcf_files:
        r = vcf.VCFReader(filename=vcf_file)
        if "CSQ" in r.infos:
            mall.get_annotator().add_preannotated_vcf_file(vcf_file)
        else:
            mall.get_annotator().add_vcf_file_to_annotator(vcf_file)

    # batch load families by VCF file
    print("project.families_by_vcf(): " + str(project.families_by_vcf()))
    for vcf_file, families in project.families_by_vcf().items():
        if vcf_file not in vcf_files:
            print("Skipping %(vcf_file)s since its not in %(vcf_files)s" % locals())
            continue

        #families = [f for f in families if get_mall(project.project_id).variant_store.get_family_status(project_id, f.family_id) != 'loaded']
        print("Loading families for VCF file: " + vcf_file)
        for i in xrange(0, len(families), settings.FAMILY_LOAD_BATCH_SIZE):
            #print(date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S  -- loading project: " + project_id + " - families batch %d - %d families" % (i, len(families[i:i+settings.FAMILY_LOAD_BATCH_SIZE]))))
            load_variants_for_family_list(project, families[i:i+settings.FAMILY_LOAD_BATCH_SIZE], vcf_file, mark_as_loaded=True)
            print(date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S  -- finished loading project: " + project_id))
def create_main_html(reqnums):
    month_data = {}
    lastmonth = 'none'
    for i,row in reqnums.iterrows():
        date = datetime.strptime(str(row[0]),'%Y-%m-%d %H:%M:%S.%f')
        reqnum = trunc(row[1])
        if date.strftime('%B_%Y') not in month_data.keys():
            month_data[date.strftime('%B_%Y')] = [(date,reqnum)]
        else:
            month_data[date.strftime('%B_%Y')].append((date,reqnum))

    tmp = []
    iter = []
    for month in month_data.keys():
        iter.append(datetime.strptime(month,'%B_%Y'))
    iter.sort()
    for month in iter:
        tmp.append(month.strftime('%B_%Y'))
    iter = tmp

    path = app.config["STATIC_PATH"]
    archivefile = "report_archive_default.html"
    archivepath = os.path.join(path,archivefile)
    with open(archivepath,'w') as fh:
        firsttime = 1
        for month in reversed(iter):
            if firsttime == 1:
                fh.write("<div id='month'><h3>{month}</h3>\n".format(month=month_data[month][0][0].strftime('%B %Y')))
                firsttime = 0
            else:
                fh.write("</div><br>\n<div id='month'><h3>{month}</h3>\n".format(month=month_data[month][0][0].strftime('%B %Y')))
            for row in month_data[month]:
                fh.write("  <button onclick=\"requestDoc('main','/static/reports/{rq}/report_{rq}.html')\">Report {rq}</button>\n".format(rq=row[1]))
        fh.write('</div>')
Example #5
0
def esPruneIndexes():
    if options.output == 'syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname, options.syslogport)))
    else:
        sh = logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    try:
        es = pyes.ES((list('{0}'.format(s) for s in options.esservers)))
        indices = es.indices.stats()['indices'].keys()
        # do the pruning
        for (index, dobackup, rotation, pruning) in zip(options.indices,
            options.dobackup, options.rotation, options.pruning):
            try:
                if pruning != '0':
                    index_to_prune = index
                    if rotation == 'daily':
                        idate = date.strftime(datetime.utcnow()-timedelta(days=int(pruning)),'%Y%m%d')
                        index_to_prune += '-%s' % idate
                    elif rotation == 'monthly':
                        idate = date.strftime(datetime.utcnow()-timedelta(days=31*int(pruning)),'%Y%m')
                        index_to_prune += '-%s' % idate

                    if index_to_prune in indices:
                        logger.info('Deleting index: %s' % index_to_prune)
                        es.indices.delete_index(index_to_prune)
                    else:
                        logger.error('Error deleting index %s, index missing' % index_to_prune)
            except Exception as e:
                logger.error("Unhandled exception while deleting %s, terminating: %r" % (index_to_prune, e))

    except Exception as e:
        logger.error("Unhandled exception, terminating: %r"%e)
Example #6
0
def writeCaption(date, headline):
    #Generate the text of the Tumblr post based on the Guadian Headline
    #headline = getGuardianHeadline(date + timedelta(1))
    if len(headline) > 0:
        title = headline['fields']['headline'].encode('utf_8')
        if 'trailText' in headline['fields']:
            standfirst = headline['fields']['trailText']
        else:
            standfirst = headline['fields']['standfirst']
        standfirst = standfirst.encode('utf_8')
        webUrl = headline['webUrl']
        #[FIXME] Noticed that trailText can contain html code, it's usually <strong>, do I leave it?
    else:
        title = "Nothing of importance happened"
        standfirst = ""
        webUrl = "#"

    formattedLink = '<a href="{link}">{headline}</a><br/>{standfirst}'.format(link=webUrl, headline=title, standfirst=standfirst)
    caption ="<p>On this great day of {date} {earth}.</p> We note that:<br/>{link}"
    tweet = "On {date} {earth}:"

    earth = random.choice(config.EARTH_METAPHORS)
        # Idea if no data for that date
        #caption ="<p>The world was nowhere to be found on {date}</p>But we can note that:<br/>{link}"
        #tweet = "Yesterday the world was nowhere to be found. {link}"

    caption = caption.format(date=date.strftime(config.DATE_LONGFORM),link=formattedLink, earth=earth)
    tweet = tweet.format(date=date.strftime(config.DATE_LONGFORM), earth=earth)
    tweet = tweet + ' {link}'

    return {'caption':caption, 'tweet':tweet}
Example #7
0
def clearESCache():
    es=esConnect(None)
    indexes=es.indices.status()['indices']
    # assums index names  like events-YYYYMMDD etc.
    # used to avoid operating on current indexes
    dtNow = datetime.utcnow()
    indexSuffix = date.strftime(dtNow, '%Y%m%d')
    previousSuffix = date.strftime(dtNow - timedelta(days=1), '%Y%m%d')
    for targetindex in sorted(indexes.keys()):
        if indexSuffix not in targetindex and previousSuffix not in targetindex:
            url = 'http://{0}/{1}/_stats'.format(random.choice(options.esservers), targetindex)
            r = requests.get(url)
            if r.status_code == 200:
                indexstats = json.loads(r.text)
                if indexstats['_all']['total']['search']['query_current'] == 0:
                    fielddata = indexstats['_all']['total']['fielddata']['memory_size_in_bytes']
                    if fielddata > 0:
                        logger.info('target: {0}: field data {1}'.format(targetindex, indexstats['_all']['total']['fielddata']['memory_size_in_bytes']))
                        clearurl = 'http://{0}/{1}/_cache/clear'.format(random.choice(options.esservers), targetindex)
                        clearRequest = requests.post(clearurl)
                        logger.info(clearRequest.text)
                        if options.conservative:
                            return
                else:
                    logger.debug('{0}: <ignoring due to current search > field data {1}'.format(targetindex, indexstats['_all']['total']['fielddata']['memory_size_in_bytes']))
            else:
                logger.error('{0} returned {1}'.format(url, r.status_code))
 def __source_v2_xls_to_csv_single(self, src_dir, dest_dir, date):
     src_file = self.get_filename(src_dir, date, 'xls')
     assert os.path.isfile(src_file)
 
     if date >= datetime(2007, 4, 1).date() or date < datetime(2000, 9, 1).date():
         return
            
     book = xlrd.open_workbook(src_file)
     sheet = book.sheet_by_index(0)
     assert sheet.ncols is 21
     assert sheet.cell(4, 0).value.strip() in ('Code & Name', 'CODE & NAME')
     assert sheet.cell(4, 11).value.strip() in ('Code & Name', 'CODE & NAME')
     assert sheet.cell(4, 8).value.strip() in ('PBR', 'в▐в╨вр')
     assert sheet.cell(4, 19).value.strip() in ('PBR', 'в▐в╨вр')
     
     dest_file = self.get_filename(dest_dir, date, 'csv')
     fd = open(dest_file, 'w', newline='')
     csv_writer = csv.writer(fd)
     for r in self.__build_sheet_records(sheet, 0, 5):
         r = [date.strftime('%Y-%m-%d')] + r
         assert len(r) is 6
         csv_writer.writerow(r)  
         self.LOGGER.debug('''%s => %s''' % (r, dest_file))
     for r in self.__build_sheet_records(sheet, 11, 5):
         r = [date.strftime('%Y-%m-%d')] + r
         assert len(r) is 6
         csv_writer.writerow(r)  
         self.LOGGER.debug('''%s => %s''' % (r, dest_file))
     fd.close()
Example #9
0
def write_org(out=ORG_FNAME):
        a,b,c = read_index(FUTURE_FNAME)
    
        lines = []
        def w(s):
          lines.append(s+"\n")

        w('* Schedule')

        for date, descr, time, location in parse_table(b):
            
          date_str = date.strftime("%Y-%m-%d %a")
          year = date.strftime("(%Y)")         
          parsed_time = parse_time(time)

          if in_past(date, parsed_time):
              continue

          if parsed_time:
            (start_hr, start_min), (end_hr, end_min) = parsed_time
            if (start_hr, start_min) == (end_hr, end_min):
                timestamp = "<%s %s:%s>" % (date_str, start_hr, start_min)
            else:
                timestamp = "<%s %s:%s>--<%s %s:%s>" % (
                    date_str, start_hr, start_min, date_str, end_hr, end_min)
          else:
              timestamp = "<%s>" % date_str

          w("** %s (%s)" % (descr, location.replace(" %s" % year, "")))
          w("   %s" % timestamp)
          w("")
        open(out, "w").writelines(lines)
Example #10
0
def view_pregnancy(req, **flts):
    filters = {
        "period": default_period(req),
        "location": default_location(req),
        "province": default_province(req),
        "district": default_district(req),
    }

    pregs = matching_reports(req, filters).filter(type=ReportType.objects.get(name="Pregnancy"))

    lxn, crd = location_name(req), map_pointers(req, filters["location"], filters)
    ans = [
        {"label": "Total Pregnancy Reports", "id": "allpreg", "number": len(pregs)},
        {"label": "High Risk Pregnancies", "id": "hrpreg", "number": len(fetch_high_risky_preg(pregs))},
        {"label": "Pregnant Wives without Toilet", "id": "notoi", "number": len(fetch_without_toilet(pregs))},
        {"label": "Pregnant Wives without Hand Washing", "id": "nohw", "number": len(fetch_without_hw(pregs))},
    ]

    return render_to_response(
        req,
        "ubuzima/pregnancy.html",
        {
            "track": ans,
            "filters": filters,
            "start_date": date.strftime(filters["period"]["start"], "%d.%m.%Y"),
            "end_date": date.strftime(filters["period"]["end"], "%d.%m.%Y"),
            "locationname": lxn,
            "coords": crd,
            "postqn": (req.get_full_path().split("?", 2) + [""])[1],
        },
    )
Example #11
0
def view_death(req, **flts):
    filters = {
        "period": default_period(req),
        "location": default_location(req),
        "province": default_province(req),
        "district": default_district(req),
    }

    reps = matching_reports(req, filters)
    chi_deaths = fetch_child_death(reps)
    mo_deaths = fetch_maternal_death(reps)
    lxn, crd = location_name(req), map_pointers(req, filters["location"], filters)
    ans = [
        {"label": "Maternal Death", "id": "matde", "number": len(mo_deaths)},
        {"label": "Child Death", "id": "chide", "number": len(chi_deaths)},
    ]

    return render_to_response(
        req,
        "ubuzima/death.html",
        {
            "track": ans,
            "stat": {"matde": len(mo_deaths), "chide": len(chi_deaths)},
            "filters": filters,
            "start_date": date.strftime(filters["period"]["start"], "%d.%m.%Y"),
            "end_date": date.strftime(filters["period"]["end"], "%d.%m.%Y"),
            "locationname": lxn,
            "coords": crd,
            "postqn": (req.get_full_path().split("?", 2) + [""])[1],
        },
    )
Example #12
0
def view_chihe(req, **flts):
    filters = {
        "period": default_period(req),
        "location": default_location(req),
        "province": default_province(req),
        "district": default_district(req),
    }

    chihe_reps = (
        matching_reports(req, filters).filter(type=ReportType.objects.get(name="Child Health")).select_related("fields")
    )
    vac_chihe_reps = fetch_vaccinated_stats(fetch_vaccinated_info(chihe_reps))
    lxn, crd = location_name(req), map_pointers(req, filters["location"], filters)

    ans = []
    for v in vac_chihe_reps.keys():
        ans.append({"label": "Children vaccinated with %s" % v, "id": "%s" % v, "number": len(vac_chihe_reps[v])})
    ans.append({"label": "ALL Children vaccinated ", "id": "all", "number": len(fetch_vaccinated_info(chihe_reps))})
    return render_to_response(
        req,
        "ubuzima/chihe.html",
        {
            "track": ans,
            "filters": filters,
            "start_date": date.strftime(filters["period"]["start"], "%d.%m.%Y"),
            "end_date": date.strftime(filters["period"]["end"], "%d.%m.%Y"),
            "locationname": lxn,
            "coords": crd,
            "postqn": (req.get_full_path().split("?", 2) + [""])[1],
        },
    )
Example #13
0
def view_stats(req, **flts):
    filters = {
        "period": default_period(req),
        "location": default_location(req),
        "province": default_province(req),
        "district": default_district(req),
    }
    track = get_stats_track(req, filters)
    hindics = health_indicators(req, filters)
    stt = filters["period"]["start"]
    fin = filters["period"]["end"]
    lox, lxn, crd = 0, location_name(req), map_pointers(req, filters["location"], filters)
    if req.REQUEST.has_key("location") and req.REQUEST["location"] != "0":
        lox = int(req.REQUEST["location"])
        lxn = Location.objects.get(id=lox)
    return render_to_response(
        req,
        "ubuzima/stats.html",
        {
            "track": track,
            "filters": filters,
            "hindics": paginated(req, hindics, prefix="hind"),
            "start_date": date.strftime(filters["period"]["start"], "%d.%m.%Y"),
            "end_date": date.strftime(filters["period"]["end"], "%d.%m.%Y"),
            "coords": crd,
            "location": lox,
            "locationname": lxn,
            "chosen": the_chosen(req.REQUEST),
            "important": get_important_stats(req, filters),
            "targets": HealthTarget.objects.all(),
            "postqn": (req.get_full_path().split("?", 2) + [""])[1],
        },
    )
Example #14
0
def reports(request):
    clinics = Provider.objects.values('clinic__id','clinic__name').distinct()
    
    zones = Case.objects.order_by("zone").values('zone', 'zone__name').distinct()
    p = Case.objects.order_by("zone").values('provider', 'provider__user__first_name', 'provider__user__last_name', 'zone').distinct()
    providers = []
    for provider in p:
        tmp = {}
        tmp['id'] = provider['provider']
        tmp['name'] = provider['provider__user__last_name'] + " " + provider['provider__user__first_name']
        tmp['zone'] = provider['zone']
        providers.append(tmp)  
        
    now = datetime.today()
    first   = Case.objects.order_by('created_at')[:1][0]
    date    = first.created_at

    months=[]
    while ((now - date).days > 0):
        months.append({'id': date.strftime("%m%Y"), 'label': date.strftime("%B %Y"), 'date': date})
        date = next_month(date)
    
    context = {
        "app": app,
        "clinics": clinics,
        "providers": providers,
        "zones": zones,
        "months": months
    }
    return as_html(request, "reports/reports.html", context)
Example #15
0
def format_date(x, pos=None):
    date = dates_list[int(x)]
    if date.month % 2 == 0:
        return ''
    if date.month == 1: 
        return date.strftime('%b %Y')
    else: return date.strftime('%b')
Example #16
0
def main():
	
	parser = OptionParser()
	parser.add_option("-d", "--db", dest="db_file", help="SMS DB File [default: %default]")
	parser.add_option("-s", "--start", dest="starttime", help="DD-MM-YY date to start at [default: %default]")
	parser.add_option("-e", "--end", dest="endtime", help="DD-MM-YY date to end at [default: %default]")
	parser.add_option("-n", "--number", dest="number", help="Phone # to search with - must match AddressBook.app format exactly, in quotes - e.g.: \"(888) 555-1212\"")
	parser.add_option("-c", "--chunksize", dest="chunksize", type="int", help="What size chunk to break the period into, in seconds [default: %default (1 hour = 3600 seconds)]")
	parser.add_option("-o", "--output", dest="output", help="What kind of output format? - Console, CSV, TSV [default: %default]")
	parser.add_option("-D", "--dateformat", dest="dateformat", help="stftime style date format [default: %default] (e.g. 'Wed 11AM')")

	parser.set_defaults(db_file="sms.db")
	parser.set_defaults(starttime=date.strftime(date.today() + timedelta(days=-7), "%d-%m-%y"))
	parser.set_defaults(endtime=date.strftime(date.today(), "%d-%m-%y"))
	parser.set_defaults(chunksize=43200)
	parser.set_defaults(output="Console")
	parser.set_defaults(dateformat="%m/%d/%y - %p")

	(options, args) = parser.parse_args()
	if not options.number:
		print "Missing phone number, option REQUIRED."
		print "Please provide the '-n' or '--number' argument.  See '-h' for help."
		sys.exit(2)

	# Collect all the SMS data
	data = runsearch(options.db_file, options.starttime, options.endtime, options.number, options.chunksize)
	# Pass SMS raw data to be formatted for the user-specified output
	output(data, options.output, options.dateformat)
	# Exit normally
	sys.exit()
Example #17
0
def text2elasticsearch(court, case, date):
    match_filename = date.strftime('%Y-%m-%d')+'_'+court+'_'+case+'_*'
    #print match_filename

    for file_name in os.listdir(data_path):
        if fnmatch.fnmatch(file_name, match_filename):
            #utf8_text = open(data_path+file_name, 'r').read().replace('\r','').decode('utf8')
            #print file_name
            f = open(data_path+file_name, 'r')
            line = f.readline().strip(' \t\n\r').decode('utf8')
            #utf8_id = replace_chinese_numbers(line)
            first_digit_in_string = re.search('\d', line)
            if first_digit_in_string is None:
                line = f.readline().strip(' \t\n\r').decode('utf8')
                first_digit_in_string = re.search('\d', line)
            if first_digit_in_string is None:
                utf8_id = file_name+'_chineseNumberFailure'
                failed_list.write(utf8_id+'\n')
            else:
                utf8_id = line[first_digit_in_string.start():]
            
            utf8_court = courts[court].decode('utf8')
            utf8_case =  cases[case].decode('utf8')
            utf8_text = open(data_path+file_name, 'r').read().decode('utf8')
            doc = {
                'court':utf8_court,
                'case':utf8_case,
                'date':date.strftime('%Y-%m-%d'),
                'text':utf8_text
            }
            #print doc['text']
            #es.index(index = 'lawper', doc_type = 'raw_text', id = utf8_id, body = doc)
            es.index(index = 'lawper', doc_type = 'crash', id = utf8_id, body = doc)
            print file_name, utf8_id
def generate_formats(date):
    year_full = date.strftime("%Y")
    year_short = date.strftime("%y")

    month_full = date.strftime("%m")
    month_short = month_full.lstrip("0")

    day_full = date.strftime("%d")
    day_short = day_full.lstrip("0")

    date_formats = ["Mdyy", "MMdyy", "Mddyy", "MMddyy", "Mdyyyy", "MMdyyyy", "Mddyyyy", "MMddyyyy", "dMyy", "dMMyy", "ddMyy", "ddMMyy", "dMyyyy", "dMMyyyy", "ddMyyyy", "ddMMyyyy"]

    formatted_dates = []

    for df in date_formats:
        cp = deepcopy(df)

        cp = cp.replace("MM", month_full)
        cp = cp.replace("M", month_short)

        cp = cp.replace("yyyy", year_full)
        cp = cp.replace("yy", year_short)

        cp = cp.replace("dd", day_full)
        cp = cp.replace("d", day_short)

        formatted_dates.append(cp)

    return formatted_dates
Example #19
0
def esPruneIndexes():
    logger.debug('started')
    try:
        es = ElasticsearchClient((list('{0}'.format(s) for s in options.esservers)))
        indices = es.get_indices()
        # do the pruning
        for (index, dobackup, rotation, pruning) in zip(options.indices, options.dobackup, options.rotation, options.pruning):
            try:
                if pruning != '0':
                    index_to_prune = index
                    if rotation == 'daily':
                        idate = date.strftime(toUTC(datetime.now()) - timedelta(days=int(pruning)), '%Y%m%d')
                        index_to_prune += '-%s' % idate
                    elif rotation == 'monthly':
                        idate = date.strftime(datetime.utcnow() - timedelta(days=31 * int(pruning)), '%Y%m')
                        index_to_prune += '-%s' % idate

                    if index_to_prune in indices:
                        logger.debug('Deleting index: %s' % index_to_prune)
                        es.delete_index(index_to_prune, True)
                    else:
                        logger.error('Error deleting index %s, index missing' % index_to_prune)
            except Exception as e:
                logger.error("Unhandled exception while deleting %s, terminating: %r" % (index_to_prune, e))

    except Exception as e:
        logger.error("Unhandled exception, terminating: %r" % e)
Example #20
0
def main():
    if options.output=='syslog':
        logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport)))
    else:
        sh=logging.StreamHandler(sys.stderr)
        sh.setFormatter(formatter)
        logger.addHandler(sh)

    logger.debug('started')
    #logger.debug(options)
    try:
        es=pyes.ES((list('{0}'.format(s) for s in options.esservers)))
        boto.connect_cloudtrail(aws_access_key_id=options.aws_access_key_id,aws_secret_access_key=options.aws_secret_access_key)
        #capture the time we start running so next time we catch any files created while we run.
        lastrun=toUTC(datetime.now()).isoformat()
        #in case we don't archive files..only look at today and yesterday's files.
        yesterday=date.strftime(datetime.utcnow()-timedelta(days=1),'%Y/%m/%d')
        today = date.strftime(datetime.utcnow(),'%Y/%m/%d')
        for region in boto.cloudtrail.regions():
            logger.debug('connecting to AWS region {0}'.format(region.name))
            ct=boto.cloudtrail.connect_to_region(region.name,aws_access_key_id=options.aws_access_key_id,aws_secret_access_key=options.aws_secret_access_key)
            trails=ct.describe_trails()['trailList']

            for trail in trails:
                s3 = boto.connect_s3(aws_access_key_id=options.aws_access_key_id,aws_secret_access_key=options.aws_secret_access_key)
                ctbucket=s3.get_bucket(trail['S3BucketName'])
                #ctbucket.get_all_keys()
                filelist=list()
                for bfile in ctbucket.list():

                    if 'CloudTrail' in bfile.key and 'json' in bfile.key:
                        if today in bfile.key or yesterday in bfile.key:
                            filelist.append(bfile.key)
                        else:
                            if options.purge:   #delete old files so we don't try to keep reading them.
                                s3file=ctbucket.get_key(afile)
                                s3file.delete()
                for afile in filelist:
                    s3file=ctbucket.get_key(afile)
                    logger.debug('{0} {1}'.format(afile,s3file.last_modified))

                    if toUTC(s3file.last_modified)>options.lastrun:
                        compressedData=s3file.read()
                        databuf=StringIO(compressedData)
                        f=gzip.GzipFile(fileobj=databuf)
                        jlog=json.loads(f.read())
                        try:
                            for r in jlog['Records']:
                                r['utctimestamp']=toUTC(r['eventTime']).isoformat()
                                jbody=json.dumps(r)
                                res=es.index(index='events',doc_type='cloudtrail',doc=jbody)
                                #logger.debug(res)
                        except Exception as e:
                            logger.error('Error handling log record {0} {1}'.format(r, e))
                            continue
            setConfig('lastrun',lastrun,options.configfile)
    except boto.exception.NoAuthHandlerFound:
        logger.error("No auth handler found, check your credentials")
    except Exception as e:
        logger.error("Unhandled exception, terminating: %r"%e)
Example #21
0
def generate_graph_object(select_lists):
    """Generates moderation and subscription data objects
    to be sent to dashboard for graph generation.
    """

    sub_objects = {}
    mod_objects = {}
    today = datetime.today().date()
    dates = []
    for i in range(0, 31):
        dates.append(today + timedelta(days=-i))
    for date in dates:
        mod_count = TaskCalender.objects.filter(list_id__in=select_lists).filter(log_type="moderation").filter(on_date=date).count()
        sub_count = TaskCalender.objects.filter(list_id__in=select_lists).filter(log_type="subscription").filter(on_date=date).count()
        if mod_count == 0:
            mod_objects[date.strftime("%Y-%m-%d")] = 0
        elif mod_count > 0:
            mod_objects[date.strftime("%Y-%m-%d")] = mod_count
        if sub_count == 0:
            sub_objects[date.strftime("%Y-%m-%d")] = 0
        elif sub_count > 0:
            sub_objects[date.strftime("%Y-%m-%d")] = sub_count
    # Sort The Data according to occurence date
    sub_objects = collections.OrderedDict(sorted(sub_objects.items()))
    mod_objects = collections.OrderedDict(sorted(mod_objects.items()))
    return sub_objects, mod_objects
Example #22
0
def tree_read(tile, date = datetime.date(2001,1,1)):
    date = datetime.date(date.year,1,1)
    src = treeRaw + tile + '/'
    flnm = src + 'MOD44B.A' + date.strftime('%Y') + '065.' + tile + '.005.hdf'
    
    dst = treePath + tile + '/'
    flnm_n = dst + 'MOD44B.' +  date.strftime('%Y') + tile + '.npy' 
    if os.path.isfile(flnm_n):
        return np.load(flnm_n)
    
    if not os.path.exists(dst):
        os.makedirs(dst)
        print 'Transferring treecover data'
    if os.path.isfile(flnm):
        temp = gdal.Open('HDF4_EOS:EOS_GRID:"' + flnm + '":MOD44B_250m_GRID:Percent_Tree_Cover', gdal.GA_ReadOnly).ReadAsArray().astype(np.float)  
        temp[temp>100] = np.nan
        tree = rebin(temp, [2400,2400])
        #np.save(flnm_n, tree)
    else:
        myfile = open(tile + 'treemissing.txt', 'a')
        myfile.write(flnm + '; occurred for 0 times.\n')
        myfile.close
        tree = np.empty([2400,2400])
        tree[:] = np.nan
    return tree 
Example #23
0
def load_project_datastore(project_id, vcf_files=None, start_from_chrom=None, end_with_chrom=None):
    """
    Load this project into the project datastore
    Which allows queries over all variants in a project
    """
    print(date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S  -- starting load_project_datastore: " + project_id))
    project = Project.objects.get(project_id=project_id)
    get_project_datastore(project_id).delete_project_store(project_id)
    get_project_datastore(project_id).add_project(project_id)
    for vcf_file in sorted(project.get_all_vcf_files(), key=lambda v:v.path()):
        vcf_file_path = vcf_file.path()
        if vcf_files is not None and vcf_file_path not in vcf_files:
            print("Skipping - %(vcf_file_path)s is not in %(vcf_files)s" % locals())
        project_indiv_ids = [i.indiv_id for i in project.get_individuals()]
        vcf_ids = vcf_file.sample_id_list()
        indiv_id_list = [i for i in project_indiv_ids if i in vcf_ids]
        get_project_datastore(project_id).add_variants_to_project_from_vcf(
            vcf_file.file_handle(),
            project_id,
            indiv_id_list=indiv_id_list,
            start_from_chrom=start_from_chrom,
            end_with_chrom=end_with_chrom
        )

    get_project_datastore(project_id).set_project_collection_to_loaded(project_id)

    print(date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S  -- load_project_datastore: " + project_id + " is done!"))
Example #24
0
def esPruneIndexes():
    es = pyes.ES((list('{0}'.format(s) for s in options.esservers)))

    indexes = es.indices.stats()['indices'].keys()
    # print('[*]\tcurrent indexes: {0}'.format(indexes))

    # set index names events-YYYYMMDD, alerts-YYYYMM, etc.
    dtNow = datetime.utcnow()
    targetSuffix=date.strftime(dtNow- timedelta(days=options.days),'%Y%m%d')
    
    # rotate daily
    eventsIndexName = 'events-{0}'.format(targetSuffix)
    
    # rotate monthly
    targetSuffix = date.strftime(dtNow- timedelta(days=options.months*30), '%Y%m')
    alertsIndexName = 'alerts-{0}'.format(targetSuffix)
    correlationsIndexName = 'correlations-{0}'.format(targetSuffix)
      
    print('[*]\tlooking for old indexes: {0},{1},{2}'.format(
        eventsIndexName,
        alertsIndexName,
        correlationsIndexName))

    if eventsIndexName in indexes:
        print('[*]\tdeleting: {0}'.format(eventsIndexName))
        es.indices.delete_index(eventsIndexName)
    if alertsIndexName in indexes:
        print('[*]\tdeleting: {0}'.format(alertsIndexName))
        es.indices.delete_index(alertsIndexName)
    if correlationsIndexName in indexes:
        print('[*]\tdeleting: {0}'.format(correlationsIndexName))
        es.indices.delete_index(correlationsIndexName)
Example #25
0
def load_project_variants_from_vcf(project_id, vcf_files, mark_as_loaded=True, start_from_chrom=None, end_with_chrom=None):
    """
    Load any families and cohorts in this project that aren't loaded already
    
    Args:
       project_id: the project id as a string
       vcf_files: a list of one or more vcf file paths
    """
    print("Called load_project_variants_from_vcf on " + str(vcf_files))
    print(date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S  -- loading project: " + project_id + " - db.variants cache"))
    project = Project.objects.get(project_id=project_id)

    for vcf_file in vcf_files:
        
        r = vcf.VCFReader(filename=vcf_file)
        if "CSQ" not in r.infos:
            raise ValueError("VEP annotations not found in VCF: " + vcf_file)
        
        if vcf_file in vcf_files:
            mall.get_annotator().add_preannotated_vcf_file(vcf_file)
            
    # batch load families by VCF file
    print("project.families_by_vcf(): " + str(project.families_by_vcf()))
    for vcf_file, families in project.families_by_vcf().items():
        if vcf_file not in vcf_files:
            print("Skipping %(vcf_file)s since its not in %(vcf_files)s" % locals())
            continue

        #families = [f for f in families if get_mall(project.project_id).variant_store.get_family_status(project_id, f.family_id) != 'loaded']
        print("Loading families for VCF file: " + vcf_file)
        for i in xrange(0, len(families), settings.FAMILY_LOAD_BATCH_SIZE):
            #print(date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S  -- loading project: " + project_id + " - families batch %d - %d families" % (i, len(families[i:i+settings.FAMILY_LOAD_BATCH_SIZE]))))
            load_variants_for_family_list(project, families[i:i+settings.FAMILY_LOAD_BATCH_SIZE], vcf_file, mark_as_loaded=mark_as_loaded, start_from_chrom=start_from_chrom, end_with_chrom=end_with_chrom)
            print(date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S  -- finished loading project: " + project_id))
    def get_id_period (self, date):

        from_iso_dt, to_iso_dt = util.inc_dt(date.strftime(util.ISO8601_DATE), util.ISO8601_DATE, self.PERIOD_TYPE)
        from_dt = util.get_dt(from_iso_dt, util.ISO8601_DATE)
        to_dt = util.get_dt(to_iso_dt, util.ISO8601_DATE)

        response = self.br.open(self.search_url)
        
        fields = {}
        date = date.strftime(self.request_date_format)
        date_parts = date.split('/')
        #fields [self.date_from_field['day']] = date_parts[0]
        fields [self.date_field_from['month']] = date_parts[1]
        fields [self.date_field_from['year']] = date_parts[2]
        
        util.setup_form(self.br, self.search_form, fields )
        if self.DEBUG: print self.br.form
        response = util.submit_form(self.br)

        final_result = []
        if response:
            html = response.read()
            url = response.geturl()
            result = scrapemark.scrape(self.scrape_ids, html, url)
            if result and result.get('records'):
                self.clean_ids(result['records'])
                final_result.extend(result['records'])
        
        if final_result:
            return final_result, from_dt, to_dt
        else:
            return [], None, None # monthly scraper - so empty result is always invalid
def MonthDelta(date, months):
    month = int(date.strftime("%m")) - months
    while month <= 0:
        month += 12
        year = int(date.strftime("%Y")) - 1
        date = date.replace(year=year)
    return date.replace(month=month)
Example #28
0
def load_project_variants(project_id, force_annotations=False, ignore_csq_in_vcf=False):
    """
    Load any families and cohorts in this project that aren't loaded already 
    """
    print "Loading project %s" % project_id
    print(date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S  -- loading project: " + project_id + " - db.variants cache"))
    project = Project.objects.get(project_id=project_id)

    for vcf_obj in project.get_all_vcf_files():
        r = vcf.VCFReader(filename=vcf_obj.path())
        if not ignore_csq_in_vcf and "CSQ" not in r.infos:
            raise ValueError("VEP annotations not found in VCF: " + vcf_file)

        mall.get_annotator().add_preannotated_vcf_file(vcf_obj.path(), force=force_annotations)
        

    # batch load families by VCF file
    for vcf_file, families in project.families_by_vcf().items():
        families = [f for f in families if get_mall(project.project_id).variant_store.get_family_status(project_id, f.family_id) != 'loaded']
        for i in xrange(0, len(families), settings.FAMILY_LOAD_BATCH_SIZE):
            print(date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S  -- loading project: " + project_id + " - families batch %d - %d families" % (i, len(families[i:i+settings.FAMILY_LOAD_BATCH_SIZE])) ))
            load_variants_for_family_list(project, families[i:i+settings.FAMILY_LOAD_BATCH_SIZE], vcf_file)

    # now load cohorts
    load_cohorts(project_id)
def send_email(score):
    msg = Message(
        'New score submitted by {name} at {datetime} for a {round} on {date}: {score}'.format(
            name=score.archer.get_name(), date=date.strftime(score.date, '%d/%m/%Y'), score=score.score,
            round=score.round.name, datetime=datetime.strftime(datetime.now(), '%d/%m/%Y %H:%M:%S')),
        recipients=app.config['MAIL_RECORDS']
    )
    msg.html = '{name} has submitted a score:<br/><br/>' \
               '{round} on {date} as {category}<br/>' \
               'Score: {score}, hits: {hits}, golds: {golds}, Xs: {xs}<br/><br/>' \
               'CSV format:<br/>' \
               '<pre>{name_csv},{cat_csv},{bow_csv},{round_csv},{date_csv},{event_csv},{score_csv},{hits_csv},' \
               '{golds_csv},{xs_csv},</pre><br/>' \
               'To approve the score click <a href="{approve}">here</a> or to reject click ' \
               '<a href="{reject}">this link</a><br/><br/>' \
               'This email was automatically generated, please don\'t reply.' \
        .format(name=score.archer.get_name(), round=score.round.name, date=date.strftime(score.date, '%d/%m/%Y'),
                category=score.category, score=score.score, hits=score.num_hits, golds=score.num_golds,
                xs=score.num_xs or 'N/A', name_csv=score.archer.get_name(),
                cat_csv=condense_category(score.archer.gender, score.category), bow_csv=score.bow.name,
                round_csv=score.round.name, date_csv=date.strftime(score.date, '%d/%m/%Y'), event_csv=score.event.name,
                score_csv=score.score, hits_csv=score.num_hits, golds_csv=score.num_golds, xs_csv=score.num_xs or '',
                approve=app.config['SITE_URL'] + url_for('admin.approve_score', score_id=score.id),
                reject=app.config['SITE_URL'] + url_for('admin.reject_score', score_id=score.id))

    thread = Thread(target=send_async_email, args=[msg])
    thread.start()
Example #30
0
def cover_read(tile, date = datetime.date(2001,1,1), forced = False, detect = False):
    date = datetime.date(date.year,1,1)
    src = coverRaw + date.strftime('%Y') + '.01.01/'
    if not os.path.exists(src): #date out of bound
        return False
    
    dst = coverPath + tile + '/'
    flnm_n = dst + 'MCD12Q1.' + date.strftime('%Y') + tile + '.npy' #file name at destination
    
    if not forced and not detect: #if not foreced updated then return previous stored data
        if os.path.isfile(flnm_n):
            return np.load(flnm_n)    
    
    flnm = src + 'MCD12Q1.A' + date.strftime('%Y') + '001.' + tile + '.051.hdf'
    if os.path.isfile(flnm):
        if detect:
            return True
        cover = gdal.Open('HDF4_EOS:EOS_GRID:"' + flnm + '":MOD12Q1:Land_Cover_Type_1', gdal.GA_ReadOnly).ReadAsArray().astype(np.int)
        cover[cover>16] = len(veg_dict) #unclassified as 17        
        if not os.path.exists(dst): #new directory
            os.makedirs(dst)
        np.save(flnm_n, cover)
        return cover

    if cover_read(tile, date = date + relativedelta(years=1), detect = detect) is not False: #tile exis in next year
        myfile = open(tile + 'covermissing.txt', 'a')
        myfile.write(flnm + '\n')
        myfile.close
    return cover_read(tile, date = date + relativedelta(years=1), detect = detect)
Example #31
0
def format_date(date):  # date = datetime object.
    return date.strftime('%Y-%m-%d %H:%M:%S')
Example #32
0
 def generate_preventive_check(self, contracts):
     issue_obj = self.env['project.issue']
     invoice_obj = self.env['account.invoice']
     account_payment_term_obj = self.env['account.payment.term']
     currency_obj = self.env['res.currency']
     ctx = dict(self._context)
     invoices_list = []
     for contract in contracts:
         account_id = contract.product_id.property_account_income.id or contract.product_id.categ_id.property_account_income_categ.id
         if contract.invoice_partner_type == 'branch':
             for branch in contract.branch_ids:
                 if branch.property_product_pricelist:
                     if contract.pricelist_id.currency_id.id != branch.property_product_pricelist.currency_id.id:
                         import_currency_rate = contract.pricelist_id.currency_id.get_exchange_rate(
                             branch.property_product_pricelist.currency_id,
                             date.strftime(date.today(), "%Y-%m-%d"))[0]
                     else:
                         import_currency_rate = 1
                 else:
                     if contract.pricelist_id.currency_id.id != contract.company_id.currency_id.id:
                         import_currency_rate = contract.pricelist_id.currency_id.get_exchange_rate(
                             contract.company_id.currency_id,
                             date.strftime(date.today(), "%Y-%m-%d"))[0]
                     else:
                         import_currency_rate = 1
                 date_due = False
                 if branch.property_payment_term:
                     pterm_list = account_payment_term_obj.compute(
                         branch.property_payment_term.id,
                         value=1,
                         date_ref=time.strftime('%Y-%m-%d'))
                     if pterm_list:
                         pterm_list = [line[0] for line in pterm_list]
                         pterm_list.sort()
                         date_due = pterm_list[-1]
                 invoice = {
                     'partner_id':
                     branch.id,
                     'company_id':
                     contract.company_id.id,
                     'payment_term':
                     branch.property_payment_term.id,
                     'account_id':
                     branch.property_account_receivable.id,
                     'name':
                     contract.name,
                     'currency_id':
                     branch.property_product_pricelist.currency_id.id
                     or contract.company_id.currency_id.id,
                     'fiscal_position':
                     branch.property_account_position.id,
                     'date_due':
                     date_due
                 }
                 ctx = dict(self._context)
                 ctx['lang'] = branch.lang
                 ctx['force_company'] = contract.company_id.id
                 ctx['company_id'] = contract.company_id.id
                 self = self.with_context(ctx)
                 inv = invoice_obj.create(invoice)
                 invoices_list.append(inv.id)
                 invoice_line = {
                     'product_id':
                     contract.product_id.id,
                     'name':
                     contract.name,
                     'quantity':
                     1,
                     'price_unit':
                     contract.amount_preventive_check *
                     import_currency_rate,
                     'discount':
                     contract.to_invoice.factor,
                     'account_analytic_id':
                     contract.id,
                     'invoice_line_tax_id':
                     [(6, 0,
                       [tax.id for tax in contract.product_id.taxes_id])],
                     'account_id':
                     account_id
                 }
                 inv.write({'invoice_line': [(0, 0, invoice_line)]})
         elif contract.invoice_partner_type == 'customer':
             date_due = False
             if contract.partner_id.property_product_pricelist:
                 if contract.pricelist_id.currency_id.id != contract.partner_id.property_product_pricelist.currency_id.id:
                     import_currency_rate = contract.pricelist_id.currency_id.get_exchange_rate(
                         contract.partner_id.
                         property_product_pricelist.currency_id,
                         date.strftime(date.today(), "%Y-%m-%d"))[0]
                 else:
                     import_currency_rate = 1
             else:
                 if contract.pricelist_id.currency_id.id != contract.company_id.currency_id.id:
                     import_currency_rate = contract.pricelist_id.currency_id.get_exchange_rate(
                         contract.company_id.currency_id,
                         date.strftime(date.today(), "%Y-%m-%d"))[0]
                 else:
                     import_currency_rate = 1
             if contract.partner_id.property_payment_term:
                 pterm_list = account_payment_term_obj.compute(
                     contract.partner_id.property_payment_term.id,
                     value=1,
                     date_ref=time.strftime('%Y-%m-%d'))
                 if pterm_list:
                     pterm_list = [line[0] for line in pterm_list]
                     pterm_list.sort()
                     date_due = pterm_list[-1]
             invoice = {
                 'partner_id':
                 contract.partner_id.id,
                 'company_id':
                 contract.company_id.id,
                 'payment_term':
                 contract.partner_id.property_payment_term.id,
                 'account_id':
                 contract.partner_id.property_account_receivable.id,
                 'name':
                 contract.name,
                 'currency_id':
                 contract.company_id.currency_id.id,
                 'fiscal_position':
                 contract.partner_id.property_account_position.id,
                 'date_due':
                 date_due
             }
             ctx = dict(self._context)
             ctx['lang'] = contract.partner_id.lang
             ctx['force_company'] = contract.company_id.id
             ctx['company_id'] = contract.company_id.id
             self = self.with_context(ctx)
             inv = invoice_obj.create(invoice)
             invoices_list.append(inv.id)
             invoice_line = {
                 'product_id':
                 contract.product_id.id,
                 'name':
                 contract.name,
                 'quantity':
                 1,
                 'price_unit':
                 contract.amount_preventive_check * import_currency_rate,
                 'discount':
                 contract.to_invoice.factor,
                 'account_analytic_id':
                 contract.id,
                 'invoice_line_tax_id':
                 [(6, 0, [tax.id for tax in contract.product_id.taxes_id])],
                 'account_id':
                 account_id
             }
             inv.write({'invoice_line': [(0, 0, invoice_line)]})
         for issue in issue_obj.search([('stage_id.closed', '=', True),
                                        ('sale_order_id', '=', False),
                                        ('invoice_ids', '=', False),
                                        ('analytic_account_id', '=',
                                         contract.id)]):
             inv.write({'issue_ids': [(4, issue.id)]})
     return invoices_list
Example #33
0
 def get_weeknr_from_date(self, date):
     return date.strftime("%V")
Example #34
0
 def _event_name(self):
     if self.otio_timeline.name:
         return self.otio_timeline.name
     return date.strftime(date.today(), "%m-%e-%y")
Example #35
0
def _date_to_api_string(date: date) -> str:  # pylint: disable=W0621
    """Convert a date object to a YYYYMMDD string expected by the API."""
    return date.strftime("%Y%m%d")
 def fmt(self, date):
     return date.strftime("%Y%m%d"+self.time)
Example #37
0
def format_only_date(date):  # date = datetime object.
    return date.strftime('%Y-%m-%d')
Example #38
0
def main():
    expiry = date.strftime(date.today() + relativedelta(years=1), "%Y-%m-%d")
    module = AnsibleModule(argument_spec=dict(
        birthday=dict(default=None, type='str'),
        city=dict(default=None, type='str'),
        country=dict(default=None, type='str'),
        department_number=dict(default=None,
                               type='str',
                               aliases=['departmentNumber']),
        description=dict(default=None, type='str'),
        display_name=dict(default=None, type='str', aliases=['displayName']),
        email=dict(default=[''], type='list'),
        employee_number=dict(default=None,
                             type='str',
                             aliases=['employeeNumber']),
        employee_type=dict(default=None, type='str', aliases=['employeeType']),
        firstname=dict(default=None, type='str'),
        gecos=dict(default=None, type='str'),
        groups=dict(default=[], type='list'),
        home_share=dict(default=None, type='str', aliases=['homeShare']),
        home_share_path=dict(default=None,
                             type='str',
                             aliases=['homeSharePath']),
        home_telephone_number=dict(default=[],
                                   type='list',
                                   aliases=['homeTelephoneNumber']),
        homedrive=dict(default=None, type='str'),
        lastname=dict(default=None, type='str'),
        mail_alternative_address=dict(default=[],
                                      type='list',
                                      aliases=['mailAlternativeAddress']),
        mail_home_server=dict(default=None,
                              type='str',
                              aliases=['mailHomeServer']),
        mail_primary_address=dict(default=None,
                                  type='str',
                                  aliases=['mailPrimaryAddress']),
        mobile_telephone_number=dict(default=[],
                                     type='list',
                                     aliases=['mobileTelephoneNumber']),
        organisation=dict(default=None, type='str'),
        overridePWHistory=dict(default=False,
                               type='bool',
                               aliases=['override_pw_history']),
        overridePWLength=dict(default=False,
                              type='bool',
                              aliases=['override_pw_length']),
        pager_telephonenumber=dict(default=[],
                                   type='list',
                                   aliases=['pagerTelephonenumber']),
        password=dict(default=None, type='str', no_log=True),
        phone=dict(default=[], type='list'),
        postcode=dict(default=None, type='str'),
        primary_group=dict(default=None, type='str', aliases=['primaryGroup']),
        profilepath=dict(default=None, type='str'),
        pwd_change_next_login=dict(default=None,
                                   type='str',
                                   choices=['0', '1'],
                                   aliases=['pwdChangeNextLogin']),
        room_number=dict(default=None, type='str', aliases=['roomNumber']),
        samba_privileges=dict(default=[],
                              type='list',
                              aliases=['sambaPrivileges']),
        samba_user_workstations=dict(default=[],
                                     type='list',
                                     aliases=['sambaUserWorkstations']),
        sambahome=dict(default=None, type='str'),
        scriptpath=dict(default=None, type='str'),
        secretary=dict(default=[], type='list'),
        serviceprovider=dict(default=[''], type='list'),
        shell=dict(default='/bin/bash', type='str'),
        street=dict(default=None, type='str'),
        title=dict(default=None, type='str'),
        unixhome=dict(default=None, type='str'),
        userexpiry=dict(default=expiry, type='str'),
        username=dict(required=True, aliases=['name'], type='str'),
        position=dict(default='', type='str'),
        ou=dict(default='', type='str'),
        subpath=dict(default='cn=users', type='str'),
        state=dict(default='present',
                   choices=['present', 'absent'],
                   type='str')),
                           supports_check_mode=True,
                           required_if=([
                               ('state', 'present',
                                ['firstname', 'lastname', 'password'])
                           ]))
    username = module.params['username']
    position = module.params['position']
    ou = module.params['ou']
    subpath = module.params['subpath']
    state = module.params['state']
    changed = False

    users = list(
        ldap_search('(&(objectClass=posixAccount)(uid={}))'.format(username),
                    attr=['uid']))
    if position != '':
        container = position
    else:
        if ou != '':
            ou = 'ou={},'.format(ou)
        if subpath != '':
            subpath = '{},'.format(subpath)
        container = '{}{}{}'.format(subpath, ou, base_dn())
    user_dn = 'uid={},{}'.format(username, container)

    exists = bool(len(users))

    if state == 'present':
        try:
            if not exists:
                obj = umc_module_for_add('users/user', container)
            else:
                obj = umc_module_for_edit('users/user', user_dn)

            if module.params['displayName'] is None:
                module.params['displayName'] = '{} {}'.format(
                    module.params['firstname'], module.params['lastname'])
            if module.params['unixhome'] is None:
                module.params['unixhome'] = '/home/{}'.format(
                    module.params['username'])
            for k in obj.keys():
                if (k != 'password' and k != 'groups'
                        and k != 'overridePWHistory' and k in module.params
                        and module.params[k] is not None):
                    obj[k] = module.params[k]
            # handle some special values
            obj['e-mail'] = module.params['email']
            password = module.params['password']
            if obj['password'] is None:
                obj['password'] = password
            else:
                old_password = obj['password'].split('}', 2)[1]
                if crypt.crypt(password, old_password) != old_password:
                    obj['overridePWHistory'] = module.params[
                        'overridePWHistory']
                    obj['overridePWLength'] = module.params['overridePWLength']
                    obj['password'] = password

            diff = obj.diff()
            if exists:
                for k in obj.keys():
                    if obj.hasChanged(k):
                        changed = True
            else:
                changed = True
            if not module.check_mode:
                if not exists:
                    obj.create()
                elif changed:
                    obj.modify()
        except:
            module.fail_json(
                msg="Creating/editing user {} in {} failed".format(
                    username, container))
        try:
            groups = module.params['groups']
            if groups:
                filter = '(&(objectClass=posixGroup)(|(cn={})))'.format(
                    ')(cn='.join(groups))
                group_dns = list(ldap_search(filter, attr=['dn']))
                for dn in group_dns:
                    grp = umc_module_for_edit('groups/group', dn[0])
                    if user_dn not in grp['users']:
                        grp['users'].append(user_dn)
                        if not module.check_mode:
                            grp.modify()
                        changed = True
        except:
            module.fail_json(
                msg="Adding groups to user {} failed".format(username))

    if state == 'absent' and exists:
        try:
            obj = umc_module_for_edit('users/user', user_dn)
            if not module.check_mode:
                obj.remove()
            changed = True
        except:
            module.fail_json(msg="Removing user {} failed".format(username))

    module.exit_json(changed=changed,
                     username=username,
                     diff=diff,
                     container=container)
Example #39
0
def get_harvest_day(date, day_name):
    if day_name == "monday":
        return []
    # print date

    try:
        response = urllib.request.urlopen(URL)
        # + BY_DAYS[date.weekday()] + ".html")
        output = response.read()
    except urllib.error.URLError:
        return []

    soup = BeautifulSoup(output)
    # print soup
    # print day_name
    div = soup.findAll("div", attrs={"id": day_name})[0]
    # print div
    table = div.find('table')  # .findAll("tbody")[0]
    # print table

    rows = table.findAll('tr')[1:]
    days_programs = []

    for row in rows:
        cols = row.findAll('td')
        ptime = datetime.datetime.combine(
            date,
            datetime.datetime.strptime(
                cols[0].text.replace(" AM", "AM").replace(" PM", "PM"),
                "%I:%M%p").time())
        if ptime.date() > date:
            continue
        time_str = ptime.strftime("%H%M")

        link = cols[2].text

        days_programs.append({
            "date": date.strftime("%Y-%m-%d"),
            "starts": time_str,
            "starts_datetime": ptime,
            "program_name": link
        })

    length = len(days_programs)
    for index, prog in enumerate(days_programs):
        if index == length - 1:
            next_prog = {
                "starts_datetime":
                datetime.datetime.combine(date + datetime.timedelta(days=1),
                                          datetime.datetime.min.time())
            }
        else:
            next_prog = days_programs[index + 1]

        if prog["program_name"] == "prev":
            prog["program_name"] = days_programs[index - 1]["program_name"]

        duration = next_prog["starts_datetime"] - prog["starts_datetime"]
        durmins = int(total_secs(duration) / 60)
        # print "this: " + str(prog["starts_datetime"]) + ", next: " + str(next_prog["starts_datetime"])
        # print durmins
        days_programs[index]["duration"] = durmins

    for prog in days_programs:
        del prog["starts_datetime"]

    return days_programs
Example #40
0
 def get_time(self):
     date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
     date = datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
     date = date + timedelta(hours=4)
     date = date.strftime("%d-%m-%Y %H:%M:%S")
     return date
Example #41
0
if not path.exists('data/' + datet + '.csv'):
    wget.download(url, out='data/')
    os.rename('data/csv', 'data/' + datet + '.csv')
    print('')

data = {}
for country in countries:
    dates = []
    cases = []
    deaths = []
    data[country] = {}
    with open('data/' + datet + '.csv', "rt",
              encoding="iso-8859-2") as csv_file:
        csv_reader = csv.reader(csv_file, delimiter=',')
        for row in csv_reader:
            if country == row[6]:
                date = datetime.strptime(row[0], '%d/%m/%Y')
                dates.append(date.strftime('%m/%d/%Y'))
                cases.append(row[4])
                deaths.append(row[5])
        data[country]['DATE'] = dates[::-1]
        data[country]['CUM_CASES'] = np.cumsum([int(ns) for ns in cases[::-1]])
        data[country]['CUM_DEATHS'] = np.cumsum(
            [int(ns) for ns in deaths[::-1]])

build_plot(data, countries, 'Cases', vis, dpi)
build_plot(data, countries, 'Deaths', vis, dpi)
build_plot(data, countries, 'Mortality', vis, dpi)
print('Done!')
Example #42
0
# and then creates a drawing canvas, draw, to which we can draw text and graphics
img = Image.new('P', (inky_display.WIDTH, inky_display.HEIGHT))
draw = ImageDraw.Draw(img)

# import our fonts
tempFont = ImageFont.truetype('/home/pi/weatherDisplay/fonts/Aller_Bd.ttf', 22)
dayFont = ImageFont.truetype('/home/pi/weatherDisplay/fonts/Roboto-Black.ttf', 18)
iconFont = ImageFont.truetype('/home/pi/weatherDisplay/fonts/Roboto-Medium.ttf', 16)
dateFont = ImageFont.truetype('/home/pi/weatherDisplay/fonts/Roboto-Bold.ttf', 14)
font = ImageFont.truetype('/home/pi/weatherDisplay/fonts/Roboto-Regular.ttf', 12)
smallFont = ImageFont.truetype('/home/pi/weatherDisplay/fonts/ElecSign.ttf', 8)
smallestFont = ImageFont.truetype('/home/pi/weatherDisplay/fonts/ElecSign.ttf', 7)

# define weekday text
weekday = date.today()
day_Name = date.strftime(weekday, '%A')
day_month = date.strftime(weekday, '%-d %B')

# format the summary texts for today 
currentCondFormatted = textwrap.fill(upcoming_conditions, 19, max_lines=4)

# draw today's name on left side
draw.text((3, 3), day_Name, inky_display.BLACK, dayFont)

# draw today's date on left side below today's name
dayDate = day_month
draw.text((3, 25), dayDate, inky_display.BLACK, dateFont)

#draw current temperature to right of day name and date. C and F
draw.text((105, 8), temp, inky_display.BLACK, tempFont)
draw.text((105, 34), tempF, inky_display.BLACK, font)
Example #43
0
def to_isoz_date_string(value: date) -> str:
    return date.strftime(value, "%Y-%m-%d")
def date_str(date):
    return date.strftime("%Y-%m-%d")
hsVerificationSocket.bind("tcp://192.168.100.43:%s" % verificationPort)

while True:
    if interfaceSocket.poll(100) != 0:
        dbCommand = interfaceSocket.recv()
        print dbCommand

        if dbCommand == 'startHyperSpec':
            acquisitionStopped = False
            stopped = False
            print "dbCommand = startHyperSpec"
            interfaceSocket.send("startHyperSpec Received - Wait 9 Seconds")
            if HyperSpecAcqStarted == False:  #run HyperSpecAcq and verify
                print "Acquisition Script Called"
                date = datetime.now()
                currTime = date.strftime("%Y%m%d%H%M%S")
                home = "E:\\HS_stdout\\"
                iiFolderName = "stdOut_2i_%s" % currTime
                iiFileName = "%s.txt" % iiFolderName
                iiPath = home + iiFolderName
                nirFolderName = "stdOut_NIR_%s" % currTime
                nirFileName = "%s.txt" % nirFolderName
                nirPath = home + nirFolderName
                if not os.path.exists(iiPath):
                    os.makedirs(iiPath)
                if not os.path.exists(nirPath):
                    os.makedirs(nirPath)
                iiFile = open("%s\\%s" % (iiPath, iiFileName), 'a+')
                nirFile = open("%s\\%s" % (nirPath, nirFileName), 'a+')
                iiAcquisition = subprocess.Popen(
                    "E:\\ResononAPI_2.2_Beta\\bin\\2iHyperSpecAcquisitionV2.6.exe",
Example #46
0
def parsedate(date):
    return date.strftime('%A')
for i in invertors:
    invertorID = i
    os.chdir("/home/vishnu/Documents/icer/" + invertorID)
    print(invertorID)

    #( date,month,year); GENERATE DATES .CSV FORMAT
    start = datetime.datetime.strptime("01-01-2018", "%d-%m-%Y")
    end = datetime.datetime.strptime("2-1-2018",
                                     "%d-%m-%Y")  # One day after the end date
    date_generated = [
        start + datetime.timedelta(days=x)
        for x in range(0, (end - start).days)
    ]
    g = []
    for date in date_generated:
        start_date = (date.strftime("%d"))
        end_date = (date.strftime("%d"))
        month = (date.strftime("%m"))
        year = (date.strftime("%Y"))
        filename = (str(year) + str(month) + str(start_date)) + ".csv"
        df = pd.DataFrame()
        dg = pd.read_csv(filename)
        df = df.append(dg, ignore_index=True)
        df.insert(0, 'Timeadjusted', 0)  # df.insert(idx,col_name,value)
        df['Timeadjusted'] = pd.to_datetime(df.Time) - pd.Timedelta(hours=8,
                                                                    minutes=30)
        #Identify and drop NAN Values
        tt = df[df['Timeadjusted'].isnull()]
        print(tt)
        df = df.dropna()
        # prints dropped df
Example #48
0
                        stock_dict[item["ticker"]][1] + pos_neg[1] * score,
                        stock_dict[item["ticker"]][2] + score
                    ]

print("finalizing data...")
output = []
date = date.today() - timedelta(days=NUM_DAYS_AGO)
for key, val in stock_dict.items():
    #limit min number of ppl per stock
    if (val[2] > 3):
        #if symbol isn't registered in yahoo finance, skip
        try:
            data = round(DataReader(key, 'yahoo', date)['Adj Close'][0], 2)
            output.append([
                key,
                int((val[0] / val[2]) * 100) / 100,
                int((val[1] / val[2]) * 100) / 100, data, val[2]
            ])
            if (len(output) % 25 == 0):
                print(f"Created {len(output)} entries...")
        except:
            continue
print(f"Created {len(output)} entries")
output = sorted(output, key=lambda x: x[4], reverse=True)

output_file = open('reddit_data.txt', 'a')
formatted_date = date.strftime("%m/%d/%y")
print(formatted_date, sep="\n", file=output_file)
print(*output, sep="\n", file=output_file)
print(f"Total time elapsed: {round((time.time() - start_time)/60, 2)} minutes")
Example #49
0
    def add_variants_to_project_from_vcf(self,
                                         vcf_file,
                                         project_id,
                                         indiv_id_list=None,
                                         start_from_chrom=None,
                                         end_with_chrom=None):
        """
        This is how variants are loaded
        """

        chrom_list = list(map(str, range(1, 23))) + ['X', 'Y']
        chrom_list_start_index = 0
        if start_from_chrom:
            chrom_list_start_index = chrom_list.index(
                start_from_chrom.replace("chr", "").upper())

        chrom_list_end_index = len(chrom_list)
        if end_with_chrom:
            chrom_list_end_index = chrom_list.index(
                end_with_chrom.replace("chr", "").upper())
        chromosomes_to_include = set(
            chrom_list[chrom_list_start_index:chrom_list_end_index])
        #tabix_file = pysam.TabixFile(vcf_file)
        #vcf_iter = tabix_file.header
        #for chrom in chrom_list[chrom_list_start_index:chrom_list_end_index]:
        #    print("Will load chrom: " + chrom)
        #    vcf_iter = itertools.chain(vcf_iter, tabix_file.fetch(chrom))

        project_collection = self._get_project_collection(project_id)
        reference_populations = self._annotator.reference_population_slugs + self._custom_populations_map.get(
            project_id)
        for counter, variant in enumerate(
                vcf_stuff.iterate_vcf(vcf_file,
                                      genotypes=True,
                                      indiv_id_list=indiv_id_list)):
            if (start_from_chrom or end_with_chrom) and variant.chr.replace(
                    "chr", "") not in chromosomes_to_include:
                continue

            if variant.alt == "*":
                #print("Skipping GATK 3.4 * alt allele: " + str(variant.unique_tuple()))
                continue

            if counter % 2000 == 0:
                print(
                    date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S") +
                    "-- inserting variant %d  %s:%s-%s-%s (%0.1f%% done with %s) "
                    % (counter, variant.chr, variant.pos, variant.ref,
                       variant.alt, 100 * variant.pos /
                       CHROMOSOME_SIZES[variant.chr.replace("chr", "")],
                       variant.chr))

            variant_dict = project_collection.find_one({
                'xpos': variant.xpos,
                'ref': variant.ref,
                'alt': variant.alt
            })
            if not variant_dict:
                variant_dict = variant.toJSON()
                try:
                    annotation = self._annotator.get_annotation(
                        variant.xpos,
                        variant.ref,
                        variant.alt,
                        populations=reference_populations)
                except ValueError, e:
                    sys.stderr.write("WARNING: " + str(e) + "\n")
                    continue
                _add_index_fields_to_variant(variant_dict, annotation)
            else:
                for indiv_id, genotype in variant.get_genotypes():
                    if genotype.num_alt != 0:
                        variant_dict['genotypes'][indiv_id] = genotype._asdict(
                        )
            project_collection.save(variant_dict)
Example #50
0
                                           2], true_dates[last_date_idx, 1],
                                true_dates[last_date_idx,
                                           0])  # obtengo la fecha de fin
all_ticks = numpy.linspace(
    date2num(start_date), date2num(end_date),
    test_size)  # obtengo un arreglo con todos los valores numericos de fechas

tick_spacing = test_size if test_size <= 60 else 30
date_format = "%m/%d" if test_size <= 60 else "%y/%m"

# major_ticks = numpy.arange(date2num(start_date), date2num(end_date), tick_spacing)  # obtengo un arreglo con los valores de fecha que quiero mostrar
major_ticks = numpy.linspace(
    date2num(start_date), date2num(end_date), tick_spacing
)  # obtengo un arreglo con los valores de fecha que quiero mostrar
major_tick_labels = [
    date.strftime(date_format) for date in num2date(major_ticks)
]

# PLOTEO DE LA DEMANDA DE SALIDA NORMALIZADA JUNTO CON LA PORCION DE INCUMBENCIA DE LOS DATOS ORIGINALES -----------------------------------------
true_net_demand = test_demand
predicted_net_demand = predicted
plot_w_xticks(all_ticks, major_ticks, major_tick_labels,
              [(true_net_demand, 'b-o'), (predicted_net_demand, 'r-o')])
axes = plt.gca()
#axes.set_ylim([0, 1])  # seteo limite en el eje y entre 0 y 1
plt.grid()
plt.show()

# PLOTEO EL ERROR COMETIDO ----------------------------------------------------------------------------------------------------------
denormalized_true_net_demand = de_normalize_dataset(test_demand.copy(),
                                                    DIFF_DEMAND)
Example #51
0
def fmt_date(date):
    return "%s, week %d, %s" % (date.isoformat(), date.isocalendar()[1],
                                date.strftime("%A, %B"))
Example #52
0
class MongoDatastore(datastore.Datastore):
    def __init__(self,
                 db,
                 annotator,
                 custom_population_store=None,
                 custom_populations_map=None):
        self._db = db
        self._annotator = annotator
        self._custom_population_store = custom_population_store
        self._custom_populations_map = custom_populations_map
        if self._custom_populations_map is None:
            self._custom_populations_map = {}

    def _make_db_query(self, genotype_filter=None, variant_filter=None):
        """
        Caller specifies filters to get_variants, but they are evaluated later.
        Here, we just inspect those filters and see what heuristics we can apply to avoid a full table scan,
        Query here must return a superset of the true get_variants results
        Note that the full annotation isn't stored, so use the fields added by _add_index_fields_to_variant
        """
        db_query = {}

        # genotype filter
        if genotype_filter is not None:
            _add_genotype_filter_to_variant_query(db_query, genotype_filter)

        if variant_filter:
            if variant_filter.locations:
                location_ranges = []
                for xstart, xend in variant_filter.locations:
                    location_ranges.append({
                        '$and': [{
                            'xpos': {
                                '$gte': xstart
                            }
                        }, {
                            'xpos': {
                                '$lte': xend
                            }
                        }]
                    })
                db_query['$or'] = location_ranges

            if variant_filter.so_annotations:
                db_query['db_tags'] = {'$in': variant_filter.so_annotations}
            if variant_filter.genes:
                db_query['db_gene_ids'] = {'$in': variant_filter.genes}
            if variant_filter.ref_freqs:
                for population, freq in variant_filter.ref_freqs:
                    if population in self._annotator.reference_population_slugs:
                        db_query['db_freqs.' + population] = {'$lte': freq}

        return db_query

    #
    # Variant search
    #

    def get_variants(self,
                     project_id,
                     family_id,
                     genotype_filter=None,
                     variant_filter=None):

        db_query = self._make_db_query(genotype_filter, variant_filter)
        sys.stderr.write("%s\n" % str(db_query))
        collection = self._get_family_collection(project_id, family_id)
        if not collection:
            print(
                "Error: mongodb collection not found for project %s family %s "
                % (project_id, family_id))
            return

        counters = OrderedDict([('returned_by_query', 0),
                                ('passes_variant_filter', 0)])
        for i, variant_dict in enumerate(
                collection.find({
                    '$and': [{
                        k: v
                    } for k, v in db_query.items()]
                }).sort('xpos').limit(MONGO_QUERY_RESULTS_LIMIT + 5)):
            if i >= MONGO_QUERY_RESULTS_LIMIT:
                raise Exception(
                    "ERROR: this search exceeded the %s variant result size limit. Please set additional filters and try again."
                    % MONGO_QUERY_RESULTS_LIMIT)

            variant = Variant.fromJSON(variant_dict)
            self.add_annotations_to_variant(variant, project_id)
            counters["returned_by_query"] += 1
            if passes_variant_filter(variant, variant_filter)[0]:
                counters["passes_variant_filter"] += 1
                yield variant

        for k, v in counters.items():
            sys.stderr.write("    %s: %s\n" % (k, v))

    def get_variants_in_gene(self,
                             project_id,
                             family_id,
                             gene_id,
                             genotype_filter=None,
                             variant_filter=None):

        if variant_filter is None:
            modified_variant_filter = VariantFilter()
        else:
            modified_variant_filter = copy.deepcopy(variant_filter)
        modified_variant_filter.add_gene(gene_id)

        db_query = self._make_db_query(genotype_filter,
                                       modified_variant_filter)
        collection = self._get_family_collection(project_id, family_id)
        if not collection:
            return

        # we have to collect list in memory here because mongo can't sort on xpos,
        # as result size can get too big.
        # need to find a better way to do this.
        variants = []
        for variant_dict in collection.find(db_query).hint([
            ('db_gene_ids', pymongo.ASCENDING), ('xpos', pymongo.ASCENDING)
        ]):
            variant = Variant.fromJSON(variant_dict)
            self.add_annotations_to_variant(variant, project_id)
            if passes_variant_filter(variant, modified_variant_filter):
                variants.append(variant)
        variants = sorted(variants, key=lambda v: v.unique_tuple())
        for v in variants:
            yield v

    def get_variants_in_range(self, project_id, family_id, xpos_start,
                              xpos_end):
        collection = self._get_family_collection(project_id, family_id)
        if not collection:
            raise ValueError("Family not found: " + str(family_id))
        for i, variant_dict in enumerate(
                collection.find({
                    '$and': [{
                        'xpos': {
                            '$gte': xpos_start
                        }
                    }, {
                        'xpos': {
                            '$lte': xpos_end
                        }
                    }]
                }).limit(MONGO_QUERY_RESULTS_LIMIT + 5)):
            if i > MONGO_QUERY_RESULTS_LIMIT:
                raise Exception(
                    "ERROR: this search exceeded the %s variant result size limit. Please set additional filters and try again."
                    % MONGO_QUERY_RESULTS_LIMIT)

            variant = Variant.fromJSON(variant_dict)
            self.add_annotations_to_variant(variant, project_id)
            yield variant

    def get_single_variant(self, project_id, family_id, xpos, ref, alt):

        collection = self._get_family_collection(project_id, family_id)
        if not collection:
            return None
        variant_dict = collection.find_one({
            'xpos': xpos,
            'ref': ref,
            'alt': alt
        })
        if variant_dict:
            variant = Variant.fromJSON(variant_dict)
            self.add_annotations_to_variant(variant, project_id)
            return variant
        else:
            return None

    def get_variants_cohort(self, project_id, cohort_id, variant_filter=None):

        db_query = self._make_db_query(None, variant_filter)
        collection = self._get_family_collection(project_id, cohort_id)
        for i, variant in enumerate(
                collection.find(db_query).sort('xpos').limit(
                    MONGO_QUERY_RESULTS_LIMIT + 5)):
            if i > MONGO_QUERY_RESULTS_LIMIT:
                raise Exception(
                    "ERROR: this search exceeded the %s variant result size limit. Please set additional filters and try again."
                    % MONGO_QUERY_RESULTS_LIMIT)

            yield Variant.fromJSON(variant)

    def get_single_variant_cohort(self, project_id, cohort_id, xpos, ref, alt):

        collection = self._get_family_collection(project_id, cohort_id)
        variant = collection.find_one({'xpos': xpos, 'ref': ref, 'alt': alt})
        return Variant.fromJSON(variant)

    #
    # New sample stuff
    #
    def get_all_individuals(self):
        """
        List of all individuals in the datastore
        Items are (project_id, indiv_id) tuples
        """
        return [(i['project_id'], i['indiv_id'])
                for i in self._db.individuals.find()]

    def get_all_families(self):
        """
        List of all families in the datastore
        Items are (project_id, family_id) tuples
        """
        return [(i['project_id'], i['family_id'])
                for i in self._db.families.find()]

    def individual_exists(self, project_id, indiv_id):
        return self._db.individuals.find_one({
            'project_id': project_id,
            'indiv_id': indiv_id
        }) is not None

    def add_individual(self, project_id, indiv_id):
        if self.individual_exists(project_id, indiv_id):
            raise Exception("Indiv (%s, %s) already exists" %
                            (project_id, indiv_id))
        indiv = {
            'project_id': project_id,
            'indiv_id': indiv_id,
        }
        self._db.individuals.save(indiv)

    def get_individuals(self, project_id):
        return [
            i['indiv_id']
            for i in self._db.individuals.find({'project_id': project_id})
        ]

    def family_exists(self, project_id, family_id):
        return self._db.families.find_one({
            'project_id': project_id,
            'family_id': family_id
        }) is not None

    def get_individuals_for_family(self, project_id, family_id):
        return self._db.families.find_one({
            'project_id': project_id,
            'family_id': family_id
        })['individuals']

    def get_family_status(self, project_id, family_id):
        family_doc = self._db.families.find_one({
            'project_id': project_id,
            'family_id': family_id
        })
        if not family_doc:
            return None
        return family_doc['status']

    def get_family_statuses(self, family_list):
        ret = {f: None for f in family_list}
        by_project = defaultdict(list)
        for project_id, family_id in family_list:
            by_project[project_id].append(family_id)
        for project_id, family_id_list in by_project.items():
            for family_doc in self._db.families.find({
                    'project_id': project_id,
                    'family_id': {
                        '$in': family_id_list
                    }
            }):
                ret[(project_id,
                     family_doc['family_id'])] = family_doc['status']
        return ret

    def _get_family_info(self, project_id, family_id=None):
        if family_id is None:
            return [
                family_info for family_info in self._db.families.find(
                    {'project_id': project_id})
            ]
        else:
            return self._db.families.find_one({
                'project_id': project_id,
                'family_id': family_id
            })

    def _get_family_collection(self, project_id, family_id):
        family_info = self._get_family_info(project_id, family_id)
        if not family_info:
            return None
        return self._db[family_info['coll_name']]

    #
    # Variant loading
    # Unique to mongo datastore, not part of protocol
    #

    def _add_family_info(self, project_id, family_id, individuals):
        """
        Add all the background info about this family
        We try to keep this as simple as possible - just IDs
        After this is run, variants are ready to be loaded
        """

        if self.family_exists(project_id, family_id):
            #raise Exception("Family (%s, %s) already exists" % (project_id, family_id))
            return

        for indiv_id in individuals:
            if not self.individual_exists(project_id, indiv_id):
                self.add_individual(project_id, indiv_id)

        family_coll_name = "family_%s_%s" % (slugify(
            project_id, separator='_'), slugify(family_id, separator='_'))
        family = {
            'project_id': project_id,
            'family_id': family_id,
            'individuals': individuals,
            'coll_name': family_coll_name,
            'status': 'loading'
        }

        family_collection = self._db[family_coll_name]
        self._index_family_collection(family_collection)

        self._db.families.save(family)

    def add_family(self, project_id, family_id, individuals):
        """
        Add new family
        Adds individuals if they don't exist yet
        Phenotypes and pedigrees aren't stored, just which individuals
        """
        self._add_family_info(project_id, family_id, individuals)

    def add_family_set(self, family_list):
        """
        Add a set of families from the same VCF file
        family_list is just a list of dicts with keys of project_id, family_id, individuals
        """
        for fam_info in family_list:
            self._add_family_info(fam_info['project_id'],
                                  fam_info['family_id'],
                                  fam_info['individuals'])

    def load_family_set(self,
                        vcf_file_path,
                        family_list,
                        reference_populations=None,
                        vcf_id_map=None,
                        mark_as_loaded=True,
                        start_from_chrom=None,
                        end_with_chrom=None):
        """
        Load a set of families from the same VCF file
        family_list is a list of (project_id, family_id) tuples
        """
        family_info_list = [
            self._get_family_info(f[0], f[1]) for f in family_list
        ]
        self._load_variants_for_family_set(
            family_info_list,
            vcf_file_path,
            reference_populations=reference_populations,
            vcf_id_map=vcf_id_map,
            start_from_chrom=start_from_chrom,
            end_with_chrom=end_with_chrom,
        )

        if mark_as_loaded:
            for family in family_info_list:
                self._finalize_family_load(family['project_id'],
                                           family['family_id'])

    def _load_variants_for_family_set(self,
                                      family_info_list,
                                      vcf_file_path,
                                      reference_populations=None,
                                      vcf_id_map=None,
                                      start_from_chrom=None,
                                      end_with_chrom=None):
        """
        Load variants for a set of families, assuming all come from the same VCF file

        Added after load_variants_for_family to speed up loading - goal is to
        only iterate the VCF once. Here's how it works:

        for each raw variant:
            annotate
            for each family:
                extract family variant from full variant
                update variant inheritance
                if family variant is relevant for family:
                    add to collection

        """
        self._add_vcf_file_for_family_set(
            family_info_list,
            vcf_file_path,
            reference_populations=reference_populations,
            vcf_id_map=vcf_id_map,
            start_from_chrom=start_from_chrom,
            end_with_chrom=end_with_chrom,
        )

    def _add_vcf_file_for_family_set(self,
                                     family_info_list,
                                     vcf_file_path,
                                     reference_populations=None,
                                     vcf_id_map=None,
                                     start_from_chrom=None,
                                     end_with_chrom=None):
        collections = {
            f['family_id']: self._db[f['coll_name']]
            for f in family_info_list
        }
        #for collection in collections.values():
        #    collection.drop_indexes()
        indiv_id_list = [i for f in family_info_list for i in f['individuals']]
        number_of_families = len(family_info_list)
        sys.stderr.write(
            "Loading variants for %(number_of_families)d families %(family_info_list)s from %(vcf_file_path)s\n"
            % locals())

        for family in family_info_list:
            print("Indexing family: " + str(family))
            collection = collections[family['family_id']]
            collection.ensure_index([('xpos', 1), ('ref', 1), ('alt', 1)])

        # check whether some of the variants for this chromosome has been loaded already
        # if yes, start from the last loaded variant, and not from the beginning
        if "_chr" in vcf_file_path or ".chr" in vcf_file_path:
            # if the VCF files are split by chromosome (eg. for WGS projects), check within the chromosome
            vcf_file = compressed_file(vcf_file_path)
            variant = next(
                vcf_stuff.iterate_vcf(vcf_file,
                                      genotypes=False,
                                      indiv_id_list=indiv_id_list,
                                      vcf_id_map=vcf_id_map))
            print(vcf_file_path + "  - chromsome: " + str(variant.chr))
            vcf_file.close()

            position_per_chrom = {}
            for chrom in range(1, 24):
                position_per_chrom[chrom] = defaultdict(int)
                for family in family_info_list:  #variants = collections[family['family_id']].find().sort([('xpos',-1)]).limit(1)
                    variants = list(collections[family['family_id']].find({
                        '$and': [{
                            'xpos': {
                                '$gte': chrom * 1e9
                            }
                        }, {
                            'xpos': {
                                '$lt': (chrom + 1) * 1e9
                            }
                        }]
                    }).sort([('xpos', -1)]).limit(1))
                    if len(variants) > 0:
                        position_per_chrom[chrom][family[
                            'family_id']] = variants[0]['xpos'] - chrom * 1e9
                    else:
                        position_per_chrom[chrom][family['family_id']] = 0

            for chrom in range(1, 24):
                position_per_chrom[chrom] = min(
                    position_per_chrom[chrom].values()
                )  # get the smallest last-loaded variant position for this chromosome across all families

            chr_idx = int(variant.xpos / 1e9)
            start_from_pos = int(position_per_chrom[chr_idx])

            print("Start from: %s - %s (%0.1f%% done)" %
                  (chr_idx, start_from_pos, 100. * start_from_pos /
                   CHROMOSOME_SIZES[variant.chr.replace("chr", "")]))
            tabix_file = pysam.TabixFile(vcf_file_path)
            vcf_iter = itertools.chain(
                tabix_file.header,
                tabix_file.fetch(variant.chr.replace("chr", ""),
                                 start_from_pos, int(2.5e8)))
        elif start_from_chrom or end_with_chrom:
            if start_from_chrom:
                print("Start chrom: chr%s" % start_from_chrom)
            if end_with_chrom:
                print("End chrom: chr%s" % end_with_chrom)

            chrom_list = list(map(str, range(1, 23))) + ['X', 'Y']
            chrom_list_start_index = 0
            if start_from_chrom:
                chrom_list_start_index = chrom_list.index(
                    start_from_chrom.replace("chr", "").upper())

            chrom_list_end_index = len(chrom_list)
            if end_with_chrom:
                chrom_list_end_index = chrom_list.index(
                    end_with_chrom.replace("chr", "").upper())

            tabix_file = pysam.TabixFile(vcf_file_path)
            vcf_iter = tabix_file.header
            for chrom in chrom_list[
                    chrom_list_start_index:chrom_list_end_index + 1]:
                print("Will load chrom: " + chrom)
                try:
                    vcf_iter = itertools.chain(vcf_iter,
                                               tabix_file.fetch(chrom))
                except ValueError as e:
                    print("WARNING: " + str(e))

        else:
            vcf_iter = vcf_file = compressed_file(vcf_file_path)
            # TODO handle case where it's one vcf file, not split by chromosome

        size = os.path.getsize(vcf_file_path)

        #progress = get_progressbar(size, 'Loading VCF: {}'.format(vcf_file_path))

        def insert_all_variants_in_buffer(buff, collections_dict):
            for family_id in buff:
                if len(buff[family_id]) == 0:  # defensive programming
                    raise ValueError(
                        "%s has zero variants to insert. Should not be in buff."
                        % family_id)

            while len(buff) > 0:
                # choose a random family for which to insert a variant from among families that still have variants to insert
                family_id = random.choice(buff.keys())

                # pop a variant off the list for this family, and insert it
                family_variant_dict_to_insert = buff[family_id].pop()
                c = collections_dict[family_id]
                c.insert(family_variant_dict_to_insert)

                if len(buff[family_id]) == 0:
                    del buff[
                        family_id]  # if no more variants for this family, delete it

        vcf_rows_counter = 0
        variants_buffered_counter = 0
        family_id_to_variant_list = defaultdict(
            list)  # will accumulate variants to be inserted all at once
        for variant in vcf_stuff.iterate_vcf(vcf_iter,
                                             genotypes=True,
                                             indiv_id_list=indiv_id_list,
                                             vcf_id_map=vcf_id_map):
            if variant.alt == "*":
                #print("Skipping GATK 3.4 * alt allele: " + str(variant.unique_tuple()))
                continue

            try:
                annotation = self._annotator.get_annotation(
                    variant.xpos,
                    variant.ref,
                    variant.alt,
                    populations=reference_populations)
            except ValueError, e:
                sys.stderr.write("WARNING: " + str(e) + "\n")
                continue

            vcf_rows_counter += 1
            for family in family_info_list:
                # TODO: can we move this inside the if relevant clause below?
                try:
                    family_variant = variant.make_copy(
                        restrict_to_genotypes=family['individuals'])
                    family_variant_dict = family_variant.toJSON()
                    _add_index_fields_to_variant(family_variant_dict,
                                                 annotation)
                    if xbrowse_utils.is_variant_relevant_for_individuals(
                            family_variant, family['individuals']):
                        collection = collections[family['family_id']]
                        if not collection.find_one({
                                'xpos': family_variant.xpos,
                                'ref': family_variant.ref,
                                'alt': family_variant.alt
                        }):
                            family_id_to_variant_list[family[
                                'family_id']].append(family_variant_dict)
                            variants_buffered_counter += 1
                except Exception, e:
                    sys.stderr.write(
                        "ERROR: on variant %s, family: %s - %s\n" %
                        (variant.toJSON(), family, e))

            if variants_buffered_counter > 2000:
                print(
                    date.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S") +
                    "-- %s:%s-%s-%s (%0.1f%% done) - inserting %d family-variants from %d vcf rows into %s families"
                    % (variant.chr, variant.pos, variant.ref, variant.alt,
                       100 * variant.pos /
                       CHROMOSOME_SIZES[variant.chr.replace("chr", "")],
                       variants_buffered_counter, vcf_rows_counter,
                       len(family_id_to_variant_list)))

                insert_all_variants_in_buffer(family_id_to_variant_list,
                                              collections)

                assert len(family_id_to_variant_list) == 0
                vcf_rows_counter = 0
                variants_buffered_counter = 0
Example #53
0
#TIMEZONE = 1

sun = Sun()


def daterange(start_date, end_date):
  for n in range(int ((end_date - start_date).days)+1):
    yield start_date + timedelta(n)

#start_date = date(2019, 1, 1)
#end_date = date(2019, 12, 31)
start_date = date(2020, 11, 1)
end_date = date(2020, 12, 31)

date = datetime.now()
gy = int(date.strftime("%Y"))
gm = int(date.strftime("%m"))
gd = int(date.strftime("%d"))

print "BEGIN:VCALENDAR"
print "PRODID:calendar-201901"
print "VERSION:2.0"

for single_date in daterange(start_date, end_date):
  #print single_date.strftime("%Y-%m-%d")

  dy = int(single_date.strftime("%Y"))
  dm = int(single_date.strftime("%m"))
  dd = int(single_date.strftime("%d"))

  sunrise = sun.getSunriseTime( coords, single_date )['decimal']# + TIMEZONE
Example #54
0
#!/usr/bin/python3

import sys, getopt
import os
from datetime import date, datetime
from src.rss_wordcloud_generator import RssWordcloudGenerator

# Generating word clouds

day = date.today()

try:
    opts, args = getopt.getopt(sys.argv[1:], "hd:", ["day="])
except getopt.GetoptError:
    print('-d <day>')
    sys.exit(1)

for opt, arg in opts:
    if opt in ("-d", "--day"):
        day = arg

date = datetime.strptime(day, '%Y-%m-%d')
gen = RssWordcloudGenerator(today=date.strftime("%Y-%m-%d"))
gen.run()
 def create_invoice_lines_issues(self,issues,invoice_dict,line_detailed,is_warranty):
     account_obj=self.env['account.analytic.account']
     invoice_obj=self.env['account.invoice']
     user = self.env['res.users'].browse(self._uid)
     total_expenses=0.0
     count_lines=1
     count_lines_products=1
     first_line_product=0
     invoices_list=[]
     cont_invoice=0
     limit_lines=user.company_id.maximum_invoice_lines
     if line_detailed==True:
         for issue in issues:
             if issue.timesheet_ids or issue.expense_line_ids and cont_invoice==0:
                 cont_invoice=1
                 break
     if cont_invoice==1 or line_detailed==False:
         inv=invoice_obj.create(invoice_dict)
         invoices_list.append(inv.id)
     for issue in issues:
         for timesheet in issue.timesheet_ids:
             for account_line in timesheet.line_id:
                 if not account_line.invoice_id:
                     total_timesheet=0.0
                     quantity,total_timesheet=account_obj._get_invoice_price(account_line.account_id,account_line.date,timesheet.start_time,timesheet.end_time,issue.product_id.id,issue.categ_id.id,account_line.unit_amount,timesheet.service_type,timesheet.employee_id.id,account_line.to_invoice.id)
                     if is_warranty==False:
                         if issue.partner_id and issue.branch_id:
                             if issue.branch_id.property_product_pricelist:
                                 if account_line.account_id.pricelist_id.currency_id.id != issue.branch_id.property_product_pricelist.currency_id.id:
                                     import_currency_rate=account_line.account_id.pricelist_id.currency_id.get_exchange_rate(issue.branch_id.property_product_pricelist.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                                 else:
                                     import_currency_rate = 1
                             else:
                                 if account_line.account_id.pricelist_id.currency_id.id != account_line.account_id.company_id.currency_id.id:
                                     import_currency_rate=account_line.account_id.pricelist_id.currency_id.get_exchange_rate(account_line.account_id.company_id.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                                 else:
                                     import_currency_rate = 1
                         elif issue.partner_id and not issue.branch_id:
                             if issue.partner_id.property_product_pricelist:
                                 if account_line.account_id.pricelist_id.currency_id.id != issue.partner_id.property_product_pricelist.currency_id.id:
                                     import_currency_rate=account_line.account_id.pricelist_id.currency_id.get_exchange_rate(issue.partner_id.property_product_pricelist.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                                 else:
                                     import_currency_rate = 1
                             else:
                                 if account_line.account_id.pricelist_id.currency_id.id != account_line.account_id.company_id.currency_id.id:
                                     import_currency_rate=account_line.account_id.pricelist_id.currency_id.get_exchange_rate(account_line.account_id.company_id.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                                 else:
                                     import_currency_rate = 1
                     elif is_warranty==True:
                         if  issue.product_id.manufacturer.property_product_pricelist_purchase:
                             if account_line.account_id.pricelist_id.currency_id.id != issue.product_id.manufacturer.property_product_pricelist_purchase.currency_id.id:
                                 import_currency_rate=account_line.account_id.pricelist_id.currency_id.get_exchange_rate(issue.product_id.manufacturer.property_product_pricelist_purchase.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                             else:
                                 import_currency_rate = 1
                         else:
                             if account_line.account_id.pricelist_id.currency_id.id != account_line.account_id.company_id.currency_id.id:
                                 import_currency_rate=account_line.account_id.pricelist_id.currency_id.get_exchange_rate(account_line.account_id.company_id.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                             else:
                                 import_currency_rate = 1
                     if (timesheet.end_time-timesheet.start_time!=0 or total_timesheet!=0):
                         invoice_line={
                                     'product_id':account_line.product_id.id ,
                                     'name': account_line.product_id.description+'-'+issue.product_id.description,
                                     'real_quantity':timesheet.end_time-timesheet.start_time,
                                     'quantity':quantity,
                                     'price_unit':total_timesheet*import_currency_rate,
                                     'uos_id':account_line.product_id.uom_id.id,
                                     'discount':account_line.account_id.to_invoice.factor,
                                     'account_analytic_id':account_line.account_id.id,
                                     'price_subtotal':total_timesheet*quantity,
                                     'invoice_line_tax_id':[(6, 0, [tax.id for tax in account_line.product_id.taxes_id])],
                                     'account_id':account_line.product_id.property_account_income.id or account_line.product_id.categ_id.property_account_income_categ.id
                                     }
                         if timesheet.ticket_number:
                             invoice_line['reference']='R#'+timesheet.ticket_number
                         if issue.warranty=='seller':
                             invoice_line['price_unit']=0
                         if count_lines<=limit_lines or limit_lines==0 or limit_lines==-1:
                             inv.write({'invoice_line':[(0,0,invoice_line)]})
                             if issue.issue_number not in inv.origin:
                                 inv.write({'origin':inv.origin + issue.issue_number +'-'})
                             inv.write({'issue_ids':[(4,issue.id)]})
                             count_lines+=1
                         else:
                             if issue.issue_number not in inv.origin:
                                 inv.write({'origin':inv.origin + issue.issue_number})
                             inv=invoice_obj.create(invoice_dict)
                             invoices_list.append(inv.id)
                             count_lines=1
                             inv.write({'invoice_line':[(0,0,invoice_line)]})
                             if issue.issue_number not in inv.origin:
                                 inv.write({'origin':inv.origin + issue.issue_number +'-'})
                             inv.write({'issue_ids':[(4,issue.id)]})
                             count_lines+=1
                     account_line.write({'invoice_id':inv.id})
         for backorder in issue.backorder_ids:
             if backorder.delivery_note_id and backorder.delivery_note_id.state=='done' and backorder.picking_type_id.code=='outgoing' and backorder.invoice_state!='invoiced' and backorder.state=='done':
                 for delivery_note_lines in backorder.delivery_note_id.note_lines:
                     if is_warranty==False:
                         if issue.partner_id and issue.branch_id:
                             if issue.branch_id.property_product_pricelist:
                                 if delivery_note_lines.note_id.currency_id.id != issue.branch_id.property_product_pricelist.currency_id.id:
                                     import_currency_rate=delivery_note_lines.note_id.currency_id.get_exchange_rate(issue.branch_id.property_product_pricelist.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                                 else:
                                     import_currency_rate = 1
                             else:
                                 if delivery_note_lines.note_id.currency_id.id !=  issue.analytic_account_id.company_id.currency_id.id:
                                     import_currency_rate=delivery_note_lines.note_id.currency_id.get_exchange_rate(issue.analytic_account_id.company_id.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                                 else:
                                     import_currency_rate = 1
                         elif issue.partner_id and not issue.branch_id:
                             if issue.partner_id.property_product_pricelist:
                                 if delivery_note_lines.note_id.currency_id.id != issue.partner_id.property_product_pricelist.currency_id.id:
                                     import_currency_rate=delivery_note_lines.note_id.currency_id.get_exchange_rate(issue.partner_id.property_product_pricelist.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                                 else:
                                     import_currency_rate = 1
                             else:
                                 if delivery_note_lines.note_id.currency_id.id !=  issue.analytic_account_id.company_id.currency_id.id:
                                     import_currency_rate=delivery_note_lines.note_id.currency_id.get_exchange_rate(issue.analytic_account_id.company_id.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                                 else:
                                     import_currency_rate = 1
                     elif is_warranty==True:
                         if  issue.product_id.manufacturer.property_product_pricelist_purchase:
                             if delivery_note_lines.note_id.currency_id.id != issue.product_id.manufacturer.property_product_pricelist_purchase.currency_id.id:
                                 import_currency_rate=delivery_note_lines.note_id.currency_id.get_exchange_rate(issue.product_id.manufacturer.property_product_pricelist_purchase.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                             else:
                                 import_currency_rate = 1
                         else:
                             if delivery_note_lines.note_id.currency_id.id !=issue.analytic_account_id.company_id.currency_id.id:
                                 import_currency_rate=delivery_note_lines.note_id.currency_id.get_exchange_rate(issue.analytic_account_id.company_id.currency_id,date.strftime(date.today(), "%Y-%m-%d"))[0]
                             else:
                                 import_currency_rate = 1
                     if delivery_note_lines.note_id.delivery_note_origin.name:
                         origin=delivery_note_lines.note_id.delivery_note_origin.name
                     else:
                         origin=delivery_note_lines.note_id.name
                     invoice_line={
                                 'product_id':delivery_note_lines.product_id.id,
                                 'name': issue.product_id.description +'-'+delivery_note_lines.product_id.description,
                                 'quantity':delivery_note_lines.quantity,
                                 'real_quantity':delivery_note_lines.quantity,
                                 'uos_id':delivery_note_lines.product_uos.id,
                                 'price_unit':delivery_note_lines.price_unit*import_currency_rate,
                                 'discount':delivery_note_lines.discount,
                                 'invoice_line_tax_id':[(6, 0, [tax.id for tax in delivery_note_lines.taxes_id])],
                                 'account_analytic_id':issue.analytic_account_id.id,
                                 'reference':'NE#'+ origin,
                                 'account_id':delivery_note_lines.product_id.property_account_income.id or delivery_note_lines.product_id.categ_id.property_account_income_categ.id
                                 }
                     if issue.warranty=='seller':
                         invoice_line['price_unit']=0
                     if line_detailed==True:
                         if first_line_product==0:
                             inv_prod=invoice_obj.create(invoice_dict)
                             if issue.issue_number not in inv_prod.origin:
                                 inv_prod.write({'origin':inv_prod.origin + issue.issue_number +'-'})
                             invoices_list.append(inv_prod.id)
                             first_line_product+=1
                         if count_lines_products<=limit_lines or limit_lines==0 or limit_lines==-1:
                             if issue.issue_number not in inv_prod.origin:
                                 inv_prod.write({'origin':inv_prod.origin + issue.issue_number +'-'})
                             inv_prod.write({'invoice_line':[(0,0,invoice_line)]})
                             inv_prod.write({'issue_ids':[(4,issue.id)]})
                             backorder.delivery_note_id.write({'invoice_ids':[(4,inv_prod.id)]})
                             count_lines_products+=1
                         else:
                             if issue.issue_number not in inv_prod.origin:
                                 inv_prod.write({'origin':inv_prod.origin + issue.issue_number})
                             inv_prod=invoice_obj.create(invoice_dict)
                             invoices_list.append(inv_prod.id)
                             count_lines_products=0
                             if issue.issue_number not in inv_prod.origin:
                                 inv_prod.write({'origin':inv_prod.origin + issue.issue_number +'-'})
                             inv_prod.write({'invoice_line':[(0,0,invoice_line)]})
                             inv_prod.write({'issue_ids':[(4,issue.id)]})
                             backorder.delivery_note_id.write({'invoice_ids':[(4,inv_prod.id)]})
                             count_lines_products+=1
                     else:
                         if count_lines<=limit_lines or limit_lines==0 or limit_lines==-1:
                             if issue.issue_number not in inv.origin:
                                 inv.write({'origin':inv.origin + issue.issue_number +'-'})
                             inv.write({'invoice_line':[(0,0,invoice_line)]})
                             inv.write({'issue_ids':[(4,issue.id)]})
                             backorder.delivery_note_id.write({'invoice_ids':[(4,inv.id)]})
                             count_lines+=1
                         else:
                             if issue.issue_number not in inv.origin:
                                 inv.write({'origin':inv.origin + issue.issue_number})
                             inv=invoice_obj.create(invoice_dict)
                             invoices_list.append(inv.id)
                             count_lines=1
                             if issue.issue_number not in inv.origin:
                                 inv.write({'origin':inv.origin + issue.issue_number +'-'})
                             inv.write({'invoice_line':[(0,0,invoice_line)]})
                             inv.write({'issue_ids':[(4,issue.id)]})
                             backorder.delivery_note_id.write({'invoice_ids':[(4,inv.id)]})
                             count_lines+=1
                     backorder.write({'invoice_state':'invoiced'})
                     backorder.move_lines.write({'invoice_state':'invoiced'})
                     backorder.delivery_note_id.write({'state':'invoiced'})
         for expense_line in issue.expense_line_ids:
             if expense_line.expense_id.state=='done' or expense_line.expense_id.state=='paid':
                 for move_lines in expense_line.expense_id.account_move_id.line_id:
                     for lines in move_lines.analytic_lines:
                         if lines.account_id==expense_line.analytic_account and lines.name==expense_line.name and lines.unit_amount==expense_line.unit_quantity and (lines.amount*-1/lines.unit_amount)==expense_line.unit_amount and not lines.invoice_id:
                             lines.write({'invoice_id':inv.id})
     return invoices_list
Example #56
0
    url = 'https://finance.yahoo.com/quote/%s/history?p=%s' % (stock_code,
                                                               stock_code)
    r = requests.get(url)
    m = re.findall('"HistoricalPriceStore":{"prices":(.*?),"isPending"',
                   r.text)
    if m:
        quotes = json.loads(m[0])
        quotes = quotes[::-1]
    return [item for item in quotes if not 'type' in item]


quotes = retrieve_quotes_historical('KO')
list1 = []
for i in range(len(quotes)):
    x = date.fromtimestamp(quotes[i]['date'])
    y = date.strftime(x, '%Y-%m-%d')
    list1.append(y)
quotesdf_ori = pd.DataFrame(quotes, index=list1)
# quotesdf_m = quotesdf_ori.drop(['unadjclose'], axis = 1)
quotesdf = quotesdf_ori.drop(['date'], axis=1)
listtemp = []
for i in range(len(quotesdf)):
    temp = time.strptime(quotesdf.index[i], "%Y-%m-%d")
    listtemp.append(temp.tm_mon)
tempdf = quotesdf.copy()
tempdf['month'] = listtemp
pl.figure(figsize=(8, 6), dpi=50)
closeMeansKO = tempdf.groupby('month').close.mean()
x = closeMeansKO.index
y = closeMeansKO.values
plt.plot(x, y, 'o')
Example #57
0
def convert_date(date):
    return date.strftime("%Y%m%d")
Example #58
0
def init(conffile):
    fp = open(conffile, "r")
    global BACKUP_VOLS
    BACKUP_VOLS = json.load(fp, "utf-8")

    for (volume, conf) in BACKUP_VOLS.items():
        daylist = savedays[volume] = []
        # last n days
        for c in range(conf['days'] - 1, -1, -1):
            daylist.append(today - timedelta(days=c))
        # last n weeks (get mondays)
        monday = today - timedelta(days=today.isoweekday() - 1)
        daylist.append(monday)
        for c in range(conf['weeks'] - 1, 0, -1):
            daylist.append(monday - timedelta(days=c * 7))
        # last n months (first monday of month)
        for c in range(conf['months'] - 1, -1, -1):
            year = today.year
            month = today.month - c
            if month <= 0:
                year = year - 1
                month = 12 + month
            firstmonday = datetime(year, month, 1).date()
            if firstmonday.isoweekday() != 1:
                firstmonday = firstmonday + timedelta(
                    days=7 - firstmonday.isoweekday() + 1)
            daylist.append(firstmonday)
            #daylist.append(datetime(today.year, today.month - c, 1).date())

    snapshots_data = local('ec2-describe-snapshots', capture=True).split('\n')

    snapshots_data = [
        tuple(l.split('\t'))[:9] for l in snapshots_data
        if l.startswith('SNAPSHOT')
    ]
    for (_, snapshot, volume, status, datestr, progress, _, _,
         _) in snapshots_data:
        snapshotdate = dateutil.parser.parse(datestr).date()
        if volume in BACKUP_VOLS:
            if snapshotdate == today:
                hastoday[volume] = {
                    'status': status,
                    'snapshot': snapshot,
                    'progress': progress.replace('%', '')
                }
            if volume not in snapshots:
                snapshots[volume] = []
            snapshots[volume].append((snapshot, status, snapshotdate))

    # sort by date
    for snapshotlist in snapshots.values():
        snapshotlist.sort(key=lambda x: x[2], reverse=True)

    for volume in BACKUP_VOLS.keys():
        if volume not in snapshots:
            snapshots[volume] = []

    print "VOLUME\tSNAPSHOT\tSTATUS\tDATE\tDESC"
    for (volume, snapshotlist) in snapshots.items():
        for (snapshot, status, date) in snapshotlist:
            datestr = date.strftime('%Y-%m-%d')
            print "%s\t%s\t%s\t%s\t%s" % (volume, snapshot, status, datestr,
                                          BACKUP_VOLS[volume]['comment'])
Example #59
0
def setLastLoggedIn(date):
    date_str = date.strftime("%Y-%m-%d %H:%M:%S")
    config.plugins.iptvplayer.shortcut_last_loggin.value = date_str
    config.plugins.iptvplayer.shortcut_last_loggin.save()
Example #60
0
def get_range_dates(startdate, date_range):
    date_ranges = []
    for i in range(date_range):
        startdate += datetime.timedelta(days=1)
        date_ranges.append(date.strftime(startdate, '%m/%d/%y'))
    return date_ranges