def generate_parameters(self, cr, uid, ids, data, context):
		val={}		
		as_on_date = data['form']['as_on_date']
		if data['form']['division_id']:				
			division_id = data['form']['division_id'][0]
			division_name = data['form']['division_id'][1] 							
		printed = data['form']['printed_by'][1]
		p_user= str(printed)		
		printed_date = data['form']['print_date']
		
		date_print =  printed_date.encode('utf-8')
		d1 = datetime.strptime(date_print,'%Y-%m-%d %H:%M:%S')
		p_date = d1.strftime( '%d-%m-%Y %H:%M:%S')
		
		as_date =  as_on_date.encode('utf-8')
		as_d1 = datetime.strptime(as_date,'%Y-%m-%d')
		as_d2 = datetime.strftime(as_d1, '%d-%m-%Y')
		
		val['as_date_range'] = as_d2	
		
		val['as_on_date'] = as_on_date
		val['user_id'] = uid		
		val['printed_by'] = str(printed)	
		val['print_date'] = p_date
		if data['form']['division_id']:	
			val['division_id'] = division_id
			val['division_name'] = division_name
		else:
			val['division_id'] = 0
			val['division_name'] = 'ALL'
			pass	
		
		return val
Exemple #2
0
def datetime_from_rietveld(date_string):
  try:
    return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S.%f')
  except ValueError:
    # Sometimes rietveld returns a value without the milliseconds part, so we
    # attempt to parse those cases as well.
    return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S')
Exemple #3
0
	def get_data(self,form):
		res = {}
		product = []
		
		if form['product_id']:
			for ids1 in form['product_id']:
				product.append("sm.product_id = %s"%(ids1))
				
		if product:
			product = 'and ('+' or '.join(product)
			product =  product+')'
		else:
			product = ''
		
				
		self.cr.execute('''
		
				SELECT
				sp.id AS sp_id,
				sp.name AS grn_no,
				to_char(sp.date, 'dd/mm/yyyy') AS grn_date,
				sp.date AS sp_date,
				sm.po_to_stock_qty AS grn_qty,
				pt.name AS pro_name,
				uom.name AS uom,
				po.name AS po_no,
				to_char(po.date_order, 'dd/mm/yyyy') AS po_date,
				po.date_order AS tapo_date,
				pol.product_qty AS po_qty
				
				FROM  stock_picking sp
				
				JOIN stock_move sm ON (sm.picking_id=sp.id)
				JOIN purchase_order_line as pol on(pol.id = sm.purchase_line_id)
				JOIN purchase_order as po on (po.id=pol.order_id)
				JOIN product_uom uom ON (uom.id=pol.product_uom)
				JOIN product_product prd ON (prd.id=pol.product_id)								
				JOIN product_template pt ON (pt.id=prd.product_tmpl_id)				
				
				where sp.state=%s and sp.type=%s and sp.date >=%s and sp.date <=%s  '''+ product + '''
				order by po.name''',('done','in',form['date_from'],form['date_to'])) 
				
		data = self.cr.dictfetchall()
		print "data =============>>>>>>>>>>>>>...........", data
		po_obj = self.pool.get('purchase.order')
		for sp in data:
			po_date = sp['tapo_date']
			grn_date = sp['sp_date']
			fmt = '%Y-%m-%d'
			from_date = po_date    
			to_date = grn_date
			print "from_date ::::::::::", from_date, "to_date  :::::::::", to_date
			d1 = datetime.strptime(from_date, fmt)
			d2 = datetime.strptime(to_date, fmt)
			daysDiff = str((d2-d1).days)
			print "daysDiff--------------->>", daysDiff
			sp['tat_days'] = daysDiff
		
			
		return data
 def _get_time_remain_funtion(self, cr, uid, ids, field_name, arg, context):
     res = {}
     if ids:
         difft_time = datetime.utcnow() - datetime.now()
         for each_ids in self.browse(cr, uid, ids, context):
             each_id = each_ids.id
             cur_record_end_time = self.browse(cr,uid,each_id).ebay_end_time
             gmt_tm = time.strftime("%Y-%m-%d %H:%M:%S", gmtime())
             new_gmt_time = datetime.strptime(gmt_tm, "%Y-%m-%d %H:%M:%S" )
             trunc_time = str(new_gmt_time)[:19]
             time_remain1 = datetime.strptime(cur_record_end_time, "%Y-%m-%d %H:%M:%S" ) - datetime.strptime(trunc_time, "%Y-%m-%d %H:%M:%S" )
             time_remain = time_remain1 + difft_time
             timeremain_str = str(time_remain)
             time_split = timeremain_str.split('.')
             if time_split:
                 timeremain = time_split[0]
             locate = timeremain
             locate_first = locate[0]
             if locate_first == '-':
                 locate_val = 'Inactive'
                 cr.execute("UPDATE product_listing SET state='%s' where id=%d"%(locate_val,each_ids.id,))
                 cr.commit()
             else:
                 locate_val = 'Active'
                 cr.execute("UPDATE product_listing SET state='%s' where id=%d"%(locate_val,each_ids.id,))
                 cr.commit()
             res[each_ids.id] =  {
                             'time_remain_function': locate,
                             'state':locate_val,
                             }
     return res
Exemple #5
0
def create_book(post):
    try:
        if post['in_serie']:
            try:
                serie = get_serie(post['serie'])
                editeur = get_editeur(post['editeur'])
                categorie = models.Categorie.objects.get(pk=post['cat_prefix'])
                cote = generate_cote(categorie, post['numero'], serie.prefix)
                date = datetime.strptime(post['date_entree'], "%Y-%m-%d").date()
                auteurs = get_auteur_from_db(post['auteurs'])
                volume = models.Volume(cote=cote, titre=post['title'], isbn=post['isbn'],
                    description=post['description'], date_entree=date, editeur=editeur,
                    serie=serie, numero=post['numero'], is_manga=post[is_manga], auteurs=auteurs,
                    nouveaute=post['nouveaute'])
                volume.save()
                return volume
            except ObjectDoesNotExist:
                return HttpResponse("KO Wrong Cat_prefix?", content_type="application/json")
        else:
            try:
                editeur = get_editeur(post['editeur'])
                categorie = models.Categorie.objects.get(pk=post['cat_prefix'])
                cote = generate_cote(categorie, 1, post(['prefix']))
                date = datetime.strptime(post['date_entree'], "%Y-%m-%d").date()
                auteurs = get_auteur_from_db(post['auteurs'])
                oneshot = models.OneShot(cote=cote, titre=post['title'], isbn=post['isbn'],
                    description=post['description'], date_entree=date, editeur=editeur,
                    prefix=post['prefix'], is_manga=post[is_manga], auteurs=auteurs,
                    nouveaute=post['nouveaute'])
                oneshot.save()
                return oneshot
            except ObjectDoesNotExist:
                return HttpResponse("KO Wrong Cat_prefix?", content_type="application/json")
    except KeyError as e:
        return HttpResponse("KO " + str(e) + " empty", content_type="application/json")
Exemple #6
0
 def _hitung_reimburse_obat(self, cr, uid, ids, wage, jatah_reimburse_pengobatan, arg, context=None):
     rec = self.browse(cr, uid, ids, context=context)[0]
     typ=rec.type_id.reimburse_pengobatan
     wag=rec.wage
     date_start =rec.date_start
     date_end = rec.date_end
     dt_yr = datetime.strptime(date_start,"%Y-%m-%d").year
     dt_bln = datetime.strptime(date_start,"%Y-%m-%d").month  
     dten_yr = False
     dten_bln = False
     if date_end != False :
         dten_yr = datetime.strptime(date_end,"%Y-%m-%d").year
         dten_bln = datetime.strptime(date_end,"%Y-%m-%d").month  
     year =datetime.now().year
     month =datetime.now().month
     result = {}
     for re in self.browse(cr, uid, ids, context=context):
         if dt_yr == year : 
             bulan = float(13 - dt_bln)      
         elif dten_yr == year and dt_yr == year :
             bulan = float(dten_bln)    
         elif date_end != False and dten_yr == year :
                 bulan = float(dten_bln)
         else :
             bulan = float(12)         
         #if wag:
         total = wag * typ
         jatah = float((wag * typ) * (bulan/12))
     if total >= rec.type_id.range_pengobatan :
         result [re.id]= float(rec.type_id.range_pengobatan * (bulan/12))
     else :
         result [re.id]= jatah
     return result    
def time_since_last_timestamp(alert_str, attempt_alert_str):
    from datetime import datetime

    try:
        timer = int(config.get("General_settings", "Email Timer"))
    except:
        timer = 60

    for line in reversed(open("/var/www/alert_log.txt").readlines()):
        if alert_str in line:
            print("Checking alert log for last time an email was sent...")
            parse = line.split("|")  # splits log file by | delimiter
            parse = parse[0]  # timestamp is always at the start of the log with index 0
            d1 = datetime.strptime(parse, "%Y-%m-%d %H:%M:%S ")
            less_timer = (datetime.now() - d1).total_seconds() / 60
            return less_timer
            break  # Cuts loop so it only takes the last email timestamp and doesnt read through the whole file
        elif attempt_alert_str in line:
            print("Checking alert log for last attempted email sent...")
            parse = line.split("|")  # splits log file by | delimiter
            parse = parse[0]  # timestamp is always at the start of the log with index 0
            d1 = datetime.strptime(parse, "%Y-%m-%d %H:%M:%S ")
            less_timer = (datetime.now() - d1).total_seconds() / 60
            return less_timer
            break  # Cuts loop so it only takes the last email timestamp and doesnt read through the whole file
    else:
        # if alert string or attempted alert string not found in log
        greater_timer = timer + 100
        return greater_timer
Exemple #8
0
def build_query():
    qb = QueryBuilder()
    start = datetime.strptime("2015-11-17 00:00:00", "%Y-%m-%d %H:%M:%S")
    end = datetime.strptime("2015-11-20 23:59:59", "%Y-%m-%d %H:%M:%S")
    qb.between(start, end)
    qb.orderbydesc("sys_created_on")
    return qb.return_query
def table1(request):
	date1 = request.GET['terid']
	date2 = request.GET['date2']
	staffId = request.GET['staffId']

	print staffId
	if date2 == "":
		print "date2 is blank"
	else:
		print "blank"
	date1 = date1.encode("utf-8")
	date2 = date2.encode("utf-8")
	date1 = datetime.strptime(date1, '%m/%d/%Y')
	date2 = datetime.strptime(date2, '%m/%d/%Y')
	if staffId == "null":
		print "staff id is blank"
		ids = Visitor.objects.filter(date__range = [date1,date2])
	else:
		print "staff visitor"
		ids = Visitor.objects.filter(date__range = [date1,date2], to_whom = staffId)
		print ids
	persons = Person.objects.filter(person_id__in = ids.values('person_id'))
	
	# visitor1 = Visitor.objects.filter(person = persons).select_related()
	print persons
	# return render_to_response('actual_table.html',
	# 			  {'tables' : Visitor.objects.filter(date__range = [date1,date2])}
	# )
	return render_to_response('actual_table.html',
				  {'tables' : persons })
Exemple #10
0
def importar_asignaturas(archivo, docente=False):
    f = codecs.open(archivo, mode='rb', encoding='utf-8')
    lineas = f.readlines()
    asignaturas = []
    for linea in lineas:
        lista = linea.split(';')[:14]
        try:
            try:
                periodoAcademicoId = int(lista[13])
                periodoAcademico = PeriodoAcademico.objects.get(id=periodoAcademicoId)
            except PeriodoAcademico.DoesNotExist:
                periodoAcademico = None
            asignatura = Asignatura(
                area=lista[0], carrera=lista[1], semestre=lista[2], 
                paralelo=lista[3], seccion=lista[4], modalidad=lista[5],
                nombre=lista[6], tipo=lista[7], creditos=int(lista[8]), 
                duracion=float(lista[9]), inicio=datetime.strptime(lista[10],'%d/%m/%Y').date(), 
                fin=datetime.strptime(lista[11],'%d/%m/%Y').date(), 
                idSGA=lista[12].replace(' ','').upper(), periodoAcademico=periodoAcademico
                )
            if docente:
                cedula = lista[13]
                docente = DocentePeriodoAcademico.objects.get(
                    usuario__cedula=cedula, periodoAcademico=periodoAcademico
                    )
                asignaturas.append(dict(asignatura=asignatura, docente=docente))
            else:
                asignaturas.append(asignatura)
        except ValueError, err:
            print "Error al convertir datos para Asignaturas: ",err
Exemple #11
0
 def GET(**vars):
     patient = db.auth_user.with_alias('patient')
     creator = db.auth_user.with_alias('creator')
     editor = db.auth_user.with_alias('editor')
     dateFrom = datetime.strptime(request.vars.dateFrom,'%Y-%m-%d %H:%M:%S')
     dateTo =  datetime.strptime(request.vars.dateTo,'%Y-%m-%d %H:%M:%S')
     if dateFrom > dateTo:
         raise HTTP(400)
     else:
         dateFrom = dateFrom.strftime('%Y-%m-%d %H:%M:%S')
         dateTo = dateTo.strftime('%Y-%m-%d %H:%M:%S')
     db.ant_biom.created_by.readable = db.ant_biom.modified_by.readable = db.ant_biom.created_on.readable = db.ant_biom.modified_on.readable = db.ant_biom.id_auth_user.readable = True
     db.post_biom.created_by.readable = db.post_biom.modified_by.readable = db.post_biom.created_on.readable = db.post_biom.modified_on.readable = db.post_biom.id_auth_user.readable = True
     rows_ant_biom = db((db.ant_biom.id_auth_user == request.vars.id_auth_user)&(db.ant_biom.created_on > dateFrom)&(db.ant_biom.created_on <= dateTo) ).select(db.ant_biom.id,
         db.ant_biom.cornea, db.ant_biom.ant_chamb,
         db.ant_biom.iris, db.ant_biom.lens, db.ant_biom.other, db.ant_biom.laterality,
         creator.first_name, creator.last_name, db.ant_biom.created_on,
         patient.first_name, patient.last_name,
         left=[patient.on(patient.id==db.ant_biom.id_auth_user),
         creator.on(creator.id==db.ant_biom.created_by)
         ])
     rows_post_biom = db((db.post_biom.id_auth_user == request.vars.id_auth_user)&(db.post_biom.created_on > dateFrom)&(db.post_biom.created_on <= dateTo)).select(db.post_biom.id,
         db.post_biom.vitreous, db.post_biom.retina, db.post_biom.macula, db.post_biom.papil, db.post_biom.other, db.post_biom.laterality,
         creator.first_name, creator.last_name, db.post_biom.created_on,
         patient.first_name, patient.last_name,
         left=[patient.on(patient.id==db.post_biom.id_auth_user),
         creator.on(creator.id==db.post_biom.created_by)
         ])
     data = '['+rows2json('ant_biom',rows_ant_biom)+','+rows2json('post_biom',rows_post_biom)+']'
     return  data
Exemple #12
0
def	get_time():
	conn = lite.connect("/home/jf/Documents/hate/Hate_story/4_teachers_mexico/4teacher.db")
	with open("./all_4_infection_rest", "r") as men, open("./all_4_time_diff_rest","w") as f:
		for line in men:
			tmp = line.strip().split("|")
			created_at = 0
			target_time = 0
			if tmp[3]:			
				created_at = datetime.strptime(tmp[3],"%Y-%m-%d %H:%M:%S")
			if tmp[4]:
				target_time = datetime.strptime(tmp[4], "%Y-%m-%d %H:%M:%S")
			delta = created_at - target_time
			 
			with conn:
				cur=conn.cursor()
				sql = "select weight from pure_mention where source=? and target=? "	
				cur.execute(sql, (tmp[1], tmp[0]) )
				result = cur.fetchall()
				weight2 = 0

				for re in result:
					weight2 = weight2 + int(re[0])

			record = tmp[2] + "," + str(weight2) + "," + str(delta.seconds/60 )
			#print record
			f.write("%s" % record)
			f.write("\n")
Exemple #13
0
 def test_sqlalchemy_function_find_datasets(self):
     from datetime import datetime
     from madmex.persistence.driver import find_datasets
     start_date = datetime.strptime('2015-01-01', "%Y-%m-%d")
     end_date = datetime.strptime( '2015-12-31', "%Y-%m-%d")
     image_paths = find_datasets(start_date, end_date, 12, 1, 10, '21048')
     print image_paths
def exportFeedback_wfactors():
	csvfileout = open('all_userfeeback.csv', 'w')
	csvheader=("idx","datetime","child_id","child_name","session","fformation","alone","groupid","gender","chouchou","feedback")
	writer = csv.DictWriter(csvfileout, csvheader)
	writer.writeheader()
	info=readInfoFile()
	all_data=[]
	files_in_dir = os.listdir('./visionLog_activity/')
	for file_in_dir in files_in_dir:
		print(file_in_dir)
		data_w = userfeedbacktoCSV('./visionLog_activity/'+file_in_dir)
		all_data.append(data_w)
	
	for data in all_data:
		for row in data:
			current_time = row[1]
			for i in info:
				start_time = datetime.strptime(i[2]+' '+i[3], '%d.%m.%y %H:%M:%S.%f')
				end_time = datetime.strptime(i[2]+' '+i[6], '%d.%m.%y %H:%M:%S.%f')
				
				if(current_time>= start_time and  current_time<= end_time):
					feedback = -1 if  row[-1]=='-' else 1
					new_row = {"idx":row[0],"datetime":datetime.strftime(current_time,'%Y-%m-%d %H:%M:%S:%f'),"child_id":i[0],"child_name":i[1],"session":i[-5],"fformation":i[5],"alone":i[-4],"groupid":i[-3],"gender":i[-2],"chouchou":i[-1],"feedback":feedback}
					writer.writerow(new_row)
	csvfileout.close()
    def MyFinalTask(self, cr, uid, ids, context=None):
        res = {}
        res = datetime.now()
        finalresult = self.write(cr, uid, ids, {'EDate': res}, context=context)
        brw_obj = self.pool.get('project.task.materials').browse(cr, uid, ids, context=context)
        print brw_obj.SDate
        print brw_obj.EDate
        if brw_obj.SDate and brw_obj.EDate:
            dt_s_obj = datetime.strptime(brw_obj.SDate,"%Y-%m-%d %H:%M:%S")
            dt_e_obj = datetime.strptime(brw_obj.EDate,"%Y-%m-%d %H:%M:%S")
            timedelta = dt_e_obj - dt_s_obj
            sec = timedelta.seconds
            #dys = sec / (3600 * 24)
            #hrs = (sec / 3600) % 24
            #minuts = (sec / 60) % 60
            #float_mint = sec/60.0
            float_hours = sec/3600.0
            print "*********************************"
            print float_hours
            print "000000000000000000000000000000000"
            result = self.write(cr, uid, ids, {'Diff_date': timedelta, 'hours':float_hours,}, context=context)
	#brw_obj_analytic = self.pool.get('account.analytic.line').browse(cr, uid, ids, context=context)
	#prj_task_hours = brw_obj_analytic.task_assigned_name.material_ids.hours
	#brw_obj_analytic.total_no_of_hours = prj_task_hours
        return finalresult, result
Exemple #16
0
    def parse(self, response):
        inmate_idx = 1;
        base = "/html/body/div[@id=\"data\"]/table/tbody[";
        sel = Selector(response);

        # Open database connection
        mongo_cli = pymongo.MongoClient('localhost', 27017);
        db = mongo_cli.hcf;

        # Get number of inmates
        inmates_length_raw = sel.xpath("count(/html/body/div[@id=\"data\"]/table/tbody)").extract();
        inmates_length = int(float(inmates_length_raw[0])); # wat

        # Iterate through inmates
        while inmate_idx <= inmates_length:
            # Get fields
            name = sel.xpath(base + str(inmate_idx) + ']/tr/td[2]/text()').extract();
            sid = sel.xpath(base + str(inmate_idx) + ']/tr/td[3]/text()').extract();
            lodged_raw = sel.xpath(base + str(inmate_idx) + ']/tr/td[4]/text()').extract();
            lodged = lodged_raw[0].split(" ")[0]; # Get only date, ignore time
            released_raw = sel.xpath(base + str(inmate_idx) + ']/tr/td[5]/text()').extract();
            released = released_raw[0].split(" ")[0]; # Get only date, ignore time
            booking = sel.xpath(base + str(inmate_idx) + ']/tr/td[1]/text()').extract();
            status = sel.xpath(base + str(inmate_idx) + ']/tr/td[6]/text()').extract();

            # Create "lite" inmate
            inmate = {
                'sid': sid[0].strip(),
                'booking': booking[0],
                'fbi': '?',
                'name': name[0].strip(),
                'lodged': datetime.strptime(lodged, "%m/%d/%y"),
                'unit': '?',
                'release': datetime.strptime(released, "%m/%d/%y"),
                'det_id': '?',
                'img_name': '?',
                'race': '?',
                'sex': '?',
                'height': 0,
                'weight': 0,
                'hair_color': '?',
                'eye_color': '?',
                'bail_amount': 0,
                'release_status': status
            };

            # Create unknown charge
            charge_set = [];
            charge_set.append(
                {
                    'charge_name': 'UNKNOWN',
                    'statute_id': 'UNKNOWN-MC',
                    'statute_desc': 'Inmate missed by roster bot, charges unknown.'
                }
            );

            # Update or add inmate, handle charge, include release status
            process_inmate(self, inmate, charge_set, db, status);

            inmate_idx += 1;
 def getObsFiles(self, variable, year, maxLeadtime=10, minLeadtime=1):
     """
     Get the observation files from an specified folder
     
     :param variable:
     :param year: start year of decadal
     :return tmp file with maxLeadtime years of observation 
     """
     if not os.path.isfile(self.observation):
         raise NoFilesFoundError, '%s does not exist.' % self.observation
     
     variable_file = cdo.showname(input=self.observation)[0]
     if variable != variable_file:
         print 'WARNING: Variable in observation file is not %s. \n Variable will be renamed.' % variable
         self.observation = cdo.chvar(variable_file+','+variable, input=self.observation,
                                      output=self.tmpDir+self.getFilename(self.observation))
     
     if len(str(year)) == 4:
         year = int(str(year)+'12')    
     start_month = datetime.strftime(datetime.strptime(str(year), "%Y%m") + relativedelta(months=1), '%Y-%m-01')
     end_month = datetime.strftime(datetime.strptime(str(year), "%Y%m") + relativedelta(months=maxLeadtime+1) - relativedelta(days=1),'%Y-%m-31')
     
     tmp = cdo.seldate(','.join([start_month, end_month]), input=self.observation,
                       output=self.tmpDir+'reanalysis_'+experiment+str(year+1)+'-'+str(year+maxLeadtime)+'.nc', options='-f nc')
     return tmp
Exemple #18
0
def extract_agg_vals(vals):
    fmts = [
      '%Y-%m-%dT%H:%M:%S.%fZ',
      '%Y-%m-%dT%H:%M:%S.%f',
      '%Y-%m-%dT%H:%M:%S',
      '%Y-%m-%dT%H:%M',
      '%Y-%m-%dT%H'
    ]
    for fmt in fmts:
      try:
        ret = [datetime.strptime(val, fmt) for val in vals]
        print vals
        if len(set([(d.hour, d.minute, d.second) for d in ret])) == 1:
          ret = [d.date() for d in ret]
        else:
          ret = [d - timedelta(hours=5) for d in ret] # compensate for 'Z' +4 timezone

        return ret
      except Exception as e:
        pass

    try:
      ret = [datetime.strptime(val, '%Y-%m-%d').date() for val in vals]
      return ret
    except Exception as ee:
      print ee
      return vals
def GetCustomer():
	import datetime
	import time
	from datetime import datetime
	from time import sleep
	frappe.errprint("in get customer")
	content=GetCount()
	frappe.errprint(content)
	oauth = GetOauthDetails()
	h = {'Content-Type': 'application/json', 'Accept': 'application/json'}
	for i in range(1,content['customer_pages_per_100_count']+1):
		frappe.errprint(i)
		r = requests.get(url='http://staging.digitales.com.au.tmp.anchor.net.au/api/rest/customers?page='+cstr(i)+'&limit=100', headers=h, auth=oauth)
		content=json.loads(r.content)

		try:
			for i in content:
				customer=frappe.db.sql("""select name,modified_date from `tabCustomer` where name='%s'"""%(cstr(i.get('organisation')).replace("'","")),as_list=1)
				if customer:
					modified_date=datetime.strptime(customer[0][1], '%Y-%m-%d %H:%M:%S')
					updated_date=datetime.strptime(i.get('updated_at'), '%Y-%m-%d %H:%M:%S')
					frappe.errprint(modified_date)
					frappe.errprint(updated_date)
					if modified_date==updated_date:
						frappe.errprint("two customer dates are equal")
						pass
					else:
						frappe.errprint("in update customer")
						update_customer(customer[0][0],i,content)
				else:
					frappe.errprint("in else part")
					create_customer(i,content)
					
		except Exception,e:
			print e,'Error'
Exemple #20
0
def test():
    eventA_start = datetime.strptime('Nov 7 2015  8:32AM', '%b %d %Y %I:%M%p')
    eventA_end = datetime.strptime('Nov 7 2015  8:38AM', '%b %d %Y %I:%M%p')
    eventA = [eventA_start, eventA_end]

    eventB_start = datetime.strptime('Nov 7 2015  8:35AM', '%b %d %Y %I:%M%p')
    eventB_end = datetime.strptime('Nov 7 2015  8:43AM', '%b %d %Y %I:%M%p')
    eventB = [eventB_start, eventB_end]

    eventC_start = datetime.strptime('Nov 7 2015  9:35AM', '%b %d %Y %I:%M%p')
    eventC_end = datetime.strptime('Nov 7 2015  9:43AM', '%b %d %Y %I:%M%p')
    eventC = [eventC_start, eventC_end]

    eventD_start = datetime.strptime('Nov 7 2015  6:35AM', '%b %d %Y %I:%M%p')
    eventD_end = datetime.strptime('Nov 7 2015  6:43AM', '%b %d %Y %I:%M%p')
    eventD = [eventD_start, eventD_end]

    eventE_start = datetime.strptime('Nov 7 2015  9:38AM', '%b %d %Y %I:%M%p')
    eventE_end = datetime.strptime('Nov 7 2015  9:40AM', '%b %d %Y %I:%M%p')
    eventE = [eventE_start, eventE_end]


    events = [eventA, eventB, eventC, eventD, eventE]

    print(removeOverlapping(events))
Exemple #21
0
def _histo_data_non_cumulative(domain_list, histogram_type, start_date,
        end_date, interval, filters):
    def _get_active_length(histo_type):
        # TODO - add to configs
        return 90 if histogram_type == 'active_cases' else 30

    timestamps = daterange(
        interval,
        datetime.strptime(start_date, "%Y-%m-%d").date(),
        datetime.strptime(end_date, "%Y-%m-%d").date(),
    )
    histo_data = {}
    for domain_name_data in domain_list:
        display_name = domain_name_data['display_name']
        domain_data = []
        for timestamp in timestamps:
            past_30_days = _histo_data(
                [domain_name_data],
                histogram_type,
                (timestamp - relativedelta(
                        days=_get_active_length())).isoformat(),
                timestamp.isoformat(),
                filters
            )
            domain_data.append(
                get_data_point(
                    sum(point['count']
                        for point in past_30_days[display_name]),
                    timestamp
                )
            )
        histo_data.update({
            display_name: domain_data
        })
    return histo_data
Exemple #22
0
def calendar_dev(request):
    if 'uid' in request.session:
        uid = request.session['uid']
        logged_in = 'none'
        user = get_object_or_404(User, id=uid)
    
    from datetime import datetime
    from datetime import timedelta
    if 'c_created' in request.session:
        get_c_data = (datetime.strptime(request.session['c_created'], '%Y-%m-%dT%H') + timedelta(hours=1)) < datetime.now() 
    else:
        get_c_data = True 
    if get_c_data:
        raw_data = {}
        from .models import Calendar
        date_list = []
        for entry in Calendar.objects.all():
            cur_date = datetime.strptime(entry.date,'%Y-%m-%d')
            print cur_date.strftime('%a, %b %d %Y')
            if cur_date >= datetime.today() - timedelta(2) and cur_date not in date_list:date_list.append(cur_date)
            # if len(entry.show_desc) > 500:
            #     entry.show_desc = entry.show_desc[:470]+'...'
            
            if entry.date not in raw_data:
                raw_data[entry.date] = []

            raw_data[entry.date].append({'date':entry.date,
                             'band':entry.show,
                             'support':'support',
                             'venue':entry.venue,
                             'tickets':entry.tickets,
                             'show_link':entry.show_link,
                             'db_id':str(entry.id),
                             'pic_url':entry.pic_url,
                             'show_desc':entry.show_desc})

        cal_data = []
        date_list.sort()
        for date in date_list:
            cal_data.append({'date':date.strftime('%a, %b %d %Y'),'shows':raw_data[date.strftime('%Y-%m-%d')]})
            
  
        request.session['data'] = cal_data
        request.session['c_created'] = datetime.now().strftime('%Y-%m-%dT%H')

    if request.method == "POST":
        if request.POST['action'] == 'plus':
            db_id = request.POST['db_id']
            
            profile = Profile.objects.get(user_auth_id=uid)
            cal_saves = ast.literal_eval(profile.cal_saves)
            if db_id not in cal_saves:
                cal_saves.append(db_id)
            profile.cal_saves = str(cal_saves)
            print profile.user_auth_id
            print profile.cal_saves
            profile.save()
    
    return render(request, 'calendar_dev.html', 
                 {'json_data':json.dumps(request.session['data']),})
Exemple #23
0
def from_json(d):
    if type(d) is dict:
        if '__cls__' in d:
            classname = d['__cls__']
            if (classname.endswith('Exception')
                    or classname.endswith('Error')
                    or classname.endswith('Exit')):
                return BusinessException(d['message'])
            if classname == 'sqlalchemy.util.KeyedTuple':
                from sqlalchemy.util import KeyedTuple
                return KeyedTuple(
                    map(lambda k: from_json(d[k]), d['_labels']),
                    labels=d['_labels'])
            if classname == 'datetime.datetime':
                from datetime import datetime
                try:
                    return datetime.strptime(d['value'], "%Y-%m-%dT%H:%M:%S")
                except ValueError:
                    return datetime.strptime(
                        d['value'], "%Y-%m-%dT%H:%M:%S.%f")
            if classname == 'datetime.date':
                from datetime import datetime
                return datetime.strptime(d['value'], "%Y-%m-%d").date()
            cls = import_class(classname)
            if hasattr(cls, 'from_json'):
                return cls.from_json(d)
        return dict(map(lambda (k, v): (k, from_json(v)), d.iteritems()))
    if type(d) is list or type(d) is tuple:
        return map(from_json, d)
    if type(d) is Decimal:
        if d % 1 == 0:
            return int(d)
        else:
            return float(d)
    return d
def userfeedbacktoCSV(fname):
	'''
	return the data_w array from a attention csv file
	'''
	with open(fname) as myfile:
		logf = myfile.readlines()
		data_w=[]
		count=0
		for row in logf:
			row = row.split(' ')
			if(len(row)==3 and row[2].startswith("user_feedback")):
				feedback = row[2].split(':')
				feedback[-1] = feedback[-1].replace('\r\n','')
				data_w.append([count,datetime.strptime(row[0].replace('"','')+' '+row[1].replace('"',''),'%Y-%m-%d %H:%M:%S:%f'), feedback[-1].replace('"','')])
			count+=1
			#print(row)
			if(len(row)==3 and row[2].startswith("repetition")):
				data_w.append([count,datetime.strptime(row[0].replace('"','')+' '+row[1].replace('"',''),'%Y-%m-%d %H:%M:%S:%f'), 'send'])
			count+=1
			#print(row)
			if(len(row)==3 and row[2].startswith("word")):
				data_w.append([count,datetime.strptime(row[0].replace('"','')+' '+row[1].replace('"',''),'%Y-%m-%d %H:%M:%S:%f'), 'new_word'])
			count+=1
			#print(row)
	return data_w
 def get_day_span(my):
     from datetime import datetime
     full_day_secs = 24 * 60 * 60
     date_format = "%Y-%m-%d %H:%M:%S"
     a = datetime.strptime(my.begin_date, date_format)
     b = datetime.strptime(my.end_date, date_format)
     delta = b - a
     days = delta.days
     day_secs = full_day_secs * days
     begin_time = my.begin_date.split(' ')
     end_time = my.end_date.split(' ')
     if len(begin_time) > 1 and len(end_time) > 1:
         begin_time = begin_time[1]
         end_time = end_time[1]
         begin_sections = begin_time.split(':')
         end_sections = end_time.split(':')
         begin_seconds = int(begin_sections[0]) * 60 * 60
         begin_seconds = (int(begin_sections[1]) * 60) + begin_seconds
         begin_seconds = begin_seconds + int(begin_sections[2])
         end_seconds = int(end_sections[0]) * 60 * 60
         end_seconds = (int(end_sections[1]) * 60) + end_seconds
         end_seconds = end_seconds + int(end_sections[2])
     time_diff = int(day_secs + (end_seconds - begin_seconds))
     diff_days = float(float(time_diff)/float(full_day_secs))
     return diff_days
Exemple #26
0
def conference_date(record):
    if 'date' in record and record['date']:
        return record['date']
    from datetime import datetime
    out = ''
    opening_date = record.get('opening_date', '')
    closing_date = record.get('closing_date', '')
    if opening_date and closing_date:
        converted_opening_date = datetime.strptime(
            opening_date, "%Y-%m-%d")
        converted_closing_date = datetime.strptime(
            closing_date, "%Y-%m-%d")
        if opening_date.split('-')[0] == closing_date.split('-')[0]:
            if opening_date.split('-')[1] == closing_date.split('-')[1]:
                out += opening_date.split('-')[2] + '-' + \
                    closing_date.split('-')[2] + \
                    ' ' + converted_opening_date.strftime('%b') + \
                    ' ' + opening_date.split('-')[0]
            else:
                out += opening_date.split('-')[2] + ' ' \
                    + converted_opening_date.strftime('%b') + ' - ' + \
                    closing_date.split('-')[2] + ' ' + \
                    converted_closing_date.strftime('%b') + ' ' + \
                    opening_date.split('-')[0]
        else:
            out += opening_date.split('-')[2] + ' ' + \
                converted_opening_date.strftime('%b') + ' ' \
                + opening_date.split('-')[0] + ' - ' + \
                closing_date.split('-')[2] + ' ' + \
                converted_closing_date.strftime('%b') + ' ' + \
                closing_date.split('-')[0]
    return out
Exemple #27
0
def pubdate_to_datetime(pubdate):
    """ Wrapper for annoying conversion"""
    # Different time formats. Propses .. omg
    #d = parser.parse(pubdate)
    #d = d.replace(tzinfo=timezone("GMT"))
    #return d
    # Found better solution, but keeping the code for now
    tok = pubdate.replace(",", "").replace(":", " ").split(" ")
    try:
        d = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %Z") if \
            len(tok[3]) > 2 else time.strptime(pubdate, "%a, %d %b %y %H:%M:%S %Z")
        return d.replace(tzinfo=timezone("GMT")) # make timezone aware for robustness
    except:
        # PYTHON 2.7 DOESNT SUPPORT %z DIRECTIVE !!!.. omg, official documentation..
        try:
            offset = int(pubdate[-5:])
            delta = timedelta(hours=offset / 100)
            d = datetime.strptime(pubdate[:-6], "%a, %d %b %Y %H:%M:%S") if \
                len(tok[3]) > 2 else datetime.strptime(pubdate[:-6], "%a, %d %b %y %H:%M:%S")
            d -= delta
            return d.replace(tzinfo=timezone("GMT")) # make timezone aware for robustness
        except Exception, e:
            print e
            print pubdate
            raise Exception("Wrong date format")
Exemple #28
0
def result():


# add a circle renderer with a size, color, and alpha
  #  p = figure()
  #  p.circle([1,2], [3,4])
  #  script, div = components(p) 
 #  return render_template('graph.html', script=script, div=div)  
       # stock_tick = 'GOOG'
   try:
        stock_tick = request.form['stock_tkr']   
        web_adr = 'https://www.quandl.com/api/v3/datasets/WIKI/' + stock_tick + '.json'
        r = requests.get(web_adr)
        #json_data = r.json()
        parsed_data = json.loads(r.text)
        new_data = pd.DataFrame(parsed_data['dataset']['data'])
        #parsed_data['dataset']['data']
        
        #calendar.day_name[datetime.strptime('01/26/2016', '%m/%d/%Y').date().weekday()]
        datetime.strptime(new_data[0][0], '%Y-%m-%d').date()
        f = lambda x: datetime.strptime(x, '%Y-%m-%d').date()
        new_data['Date'] = new_data[0].map(f)
        
        p=figure(title= 'Closing prices for the last month (20 working days)', x_axis_type="datetime")
        r = p.line(new_data['Date'][0:20].values, new_data[4][0:20].values, alpha=0.5, line_width=3, legend= stock_tick)
        
        p.xaxis.axis_label = 'Date'
        p.yaxis.axis_label = 'Price (US dollars)'        
        script, div = components(p) 
        return render_template('graph.html', script=script, div=div)
   except:
          return render_template('index.html')
Exemple #29
0
def get_log_entries_by_incident(incid):
    '''Get log entries by incident id'''

    from datetime import datetime

    r1 = requests.get('{0}incidents/{1}/log_entries?include[]=channel'.format(api_url,incid), headers=headers, stream=True)
    od = list(reversed(r1.json()['log_entries']))
    od_first = od[0]
    res_method = first_notification_time = first_ack_time = 'None'
    od_number_of_notifications = od_number_of_acks = 0
    was_there_an_initial_alert = was_there_an_ack = False
    for ea_logentry in od:
        if ea_logentry['type'] == 'notify':
            od_first_notification = ea_logentry
            was_there_an_initial_alert = True
            first_notification_time = od_first_notification['created_at']
            od_number_of_notifications += 1

        if ea_logentry['type'] == 'acknowledge':
            was_there_an_ack = True
            first_ack_time = ea_logentry['created_at']
            od_number_of_acks += 1

        if ea_logentry['type'] == 'resolve':
            res_method = ea_logentry['channel']['type']

    tf = "%Y-%m-%dT%H:%M:%SZ"
    time_bet_1stalert_and_1stack = 0
    if not was_there_an_initial_alert or not was_there_an_ack:
        time_bet_1stalert_and_1stack = 'None'
    else:
        time_bet_1stalert_and_1stack = datetime.strptime(first_ack_time, tf) - datetime.strptime(first_notification_time, tf)
        time_bet_1stalert_and_1stack = "{0}m{1}s".format(str(time_bet_1stalert_and_1stack).split(":")[1],str(time_bet_1stalert_and_1stack).split(":")[2])
    
    return {'time_bet': time_bet_1stalert_and_1stack, 'num_noti': str(od_number_of_notifications), 'num_acks': str(od_number_of_acks), 'res_method': res_method}
Exemple #30
0
def getExpenditure(category = "all", duration=9999):
	print category
	f = open("data.json","r")
	data = json.load(f)
	f.close()
	if duration == 9999:
		return data[0]["budgeting"]["expenditure"][category]
	trans_by_category = []
	if category == "all":
		trans_by_category = data[0]["budgeting"]["transactions"]
	else:
		for x in data[0]["budgeting"]["transactions"]:
			if x["category"] == category:
				trans_by_category.append(x)
	trans_by_category_exp = []
	for x in trans_by_category:
		if str(x["addToBudget"]) == "false":
			trans_by_category_exp.append(x)
	print (trans_by_category_exp)
	date_format = "%m/%d/%Y"
	expenditure_display = 0
	today = time.strftime("%m/%d/%Y")
	today = datetime.strptime(today,date_format)
	for x in trans_by_category_exp:
		a = datetime.strptime(str(x["insertedAt"]), date_format)
		if (int((today-a).days)) <= duration:
			expenditure_display += int(x["amount"])
	return expenditure_display	
Exemple #31
0
def attendance(password, user, period, start=None, end=None):
    """
    Retrieves timesheet and totals it for the current month.
    """
    from datetime import datetime, timedelta
    import pytz
    import holidays

    def colored_diff(title, diff, notes=None, invert=False):
        positive_color = 'green'
        negative_color = 'magenta'
        if invert:
            positive_color = 'magenta'
            negative_color = 'green'

        if not notes:
            notes = ''
        else:
            notes = f' ! {notes}'

        color = negative_color if diff[0] == '-' else positive_color
        click.echo(
            click.style(f'{title}\t', fg='blue') +
            click.style(diff, fg=color) + click.style(notes, fg='magenta'))

    if password is None:
        password = get_pass()
    check_config()
    with Settings() as config:
        client = Client(username=config['username'],
                        password=password,
                        database=config['database'],
                        host=config['host'])
    client.connect()
    if not user:
        user_id = client.user.id

    filters = [('employee_id.user_id.id', '=', user_id)]
    filters_leave = [('employee_id.user_id.id', '=', user_id),
                     ('holiday_type', '=', 'employee')]

    # Add end cutoff for checkout if there is one
    if end:
        filters.append(('check_out', '<', end.strftime('%Y-%m-%d 00:00:00')))

    # Get start and end times
    if start:
        # No need to calculate start or end
        pass
    elif period == 'month':
        # Calculate month
        start = datetime.now().replace(day=1, hour=0, minute=0, second=0)
        if start.month < 12:
            end = start.replace(month=start.month + 1, day=1) - \
                timedelta(days=1)
        else:
            end = start.replace(day=31)
    elif period == 'year':
        # Calculate year
        start = datetime.now().replace(month=1,
                                       day=1,
                                       hour=0,
                                       minute=0,
                                       second=0)
        end = start.replace(month=start.month, day=31)

    # Add start filters
    filters.append(('check_in', '>=', start.strftime('%Y-%m-%d 00:00:00')))
    filters_leave.append(
        ('date_from', '>=', start.strftime('%Y-%m-%d 00:00:00')))

    # Always set end to end of today if not set
    if not end:
        end = datetime.now().replace(hour=23, minute=59, second=59)

    # Add end cutoff for leaves
    filters_leave.append(('date_to', '<', end.strftime('%Y-%m-%d 00:00:00')))

    attendance_ids = client.search('hr.attendance', filters)
    attendances = client.read('hr.attendance', attendance_ids)

    leave_ids = client.search('hr.holidays', filters_leave)
    leaves = client.read('hr.holidays', leave_ids)

    def daterange(start_date, end_date):
        for n in range(int((end_date - start_date).days)):
            yield start_date + timedelta(n)

    # Pre-process the weeks and days
    weeks = {}
    # @TODO Assumes user is in Finland
    local_holidays = holidays.FI()

    # Faux data to test holidays
    # attendances.append({
    #     'check_in': '2018-01-01 00:00:00',
    #     'check_out': '2018-01-01 02:00:00',
    #     'worked_hours': 2
    # })

    # Process attendances
    for attendance in attendances:
        # Get a localized datetime object
        # @TODO This assumes the server returns times as EU/Helsinki
        date = pytz.timezone('Europe/Helsinki').localize(
            datetime.strptime(attendance['check_in'], '%Y-%m-%d %H:%M:%S'))

        # If there is no checkout time, sum to now
        if attendance['check_out'] == False:
            # @TODO Same as above
            now = pytz.timezone('Europe/Helsinki').localize(datetime.utcnow())
            attendance['worked_hours'] = (now - date).seconds / 3600

        # Get the day and week index keys (Key = %Y-%m-%d)
        day_key = date.strftime('%Y-%m-%d')
        # Counts weeks from first Monday of the year
        week_key = date.strftime('%W')

        if week_key not in weeks:
            weeks[week_key] = {}

        if day_key not in weeks[week_key]:
            # @TODO Assumes 7.5 hours per day
            weeks[week_key][day_key] = {
                'allocated_hours': 7.5,
                'worked_hours': 0,
                'overtime': False,
                'overtime_reason': None
            }

        # Sum the attendance
        weeks[week_key][day_key]['worked_hours'] += attendance['worked_hours']

    for date in daterange(start, end):
        # Get the day and week index keys (Key = %Y-%m-%d)
        day_key = date.strftime('%Y-%m-%d')
        # Counts weeks from first Monday of the year
        week_key = date.strftime('%W')
        if day_key not in weeks.get(week_key, {}):
            # We don't care, no attendances for this day
            continue

        if day_key in local_holidays:
            # This day is a holiday, no allocated hours
            weeks[week_key][day_key]['overtime'] = True
            weeks[week_key][day_key]['overtime_reason'] = local_holidays.get(
                day_key)
            weeks[week_key][day_key]['allocated_hours'] = 0

        if date.isoweekday() >= 6:
            # Weekend, assume everything is overtime
            weeks[week_key][day_key]['overtime'] = True
            weeks[week_key][day_key]['overtime_reason'] = 'Weekend'
            weeks[week_key][day_key]['allocated_hours'] = 0

    # Process any leaves
    for leave in leaves:
        leave_start = pytz.timezone('Europe/Helsinki').localize(
            datetime.strptime(leave['date_from'], '%Y-%m-%d %H:%M:%S'))
        leave_end = pytz.timezone('Europe/Helsinki').localize(
            datetime.strptime(leave['date_to'], '%Y-%m-%d %H:%M:%S'))
        for date in daterange(leave_start, leave_end):
            # Get the day and week index keys (Key = %Y-%m-%d)
            day_key = date.strftime('%Y-%m-%d')
            # Counts weeks from first Monday of the year
            week_key = date.strftime('%W')
            if day_key not in weeks.get(week_key, {}):
                # We don't care, no attendances for this day
                continue
            weeks[week_key][day_key]['overtime'] = True
            weeks[week_key][day_key][
                'overtime_reason'] = f'Leave: {leave["name"]}'
            weeks[week_key][day_key]['allocated_hours'] = 0

    total_diff = 0
    total_hours = 0
    day_diff = 0
    click.echo(
        click.style(
            f'Balance as of {(datetime.today().isoformat(timespec="seconds"))} (system time)',
            fg='blue'))
    click.echo(click.style('Day\t\tWorked\tDifference', fg='blue'))
    for week_number, week in sorted(weeks.items()):
        for key, day in sorted(week.items()):
            if day['worked_hours'] == 0.0:
                continue
            diff = day['worked_hours'] - day['allocated_hours']
            colored_diff(f'{key}\t{(day["worked_hours"]):.2f}', f'{diff:+.2f}',
                         day.get('overtime_reason', None))

            if key == datetime.today().strftime('%Y-%m-%d'):
                day_diff += day['worked_hours'] - day['allocated_hours']
            else:
                total_diff += day['worked_hours'] - day['allocated_hours']
            total_hours += day['worked_hours']

    today = datetime.now().strftime('%Y-%m-%d')
    this_week = datetime.now().strftime('%W')
    hours_today = 0
    allocated_today = 0
    if today in weeks.get(this_week, {}):
        hours_today = weeks[this_week][today]['worked_hours']
        allocated_today = weeks[this_week][today]['allocated_hours']

    click.echo(click.style('---\t\t------\t-----', fg='blue'))
    colored_diff(f'Totals:\t\t{total_hours:.2f}',
                 f'{(total_diff + day_diff):+.2f}')
    print()
    colored_diff('Balance yesterday:', f'{total_diff:+.2f}')
    colored_diff('Balance now:\t', f'{(total_diff + day_diff):+.2f}')
    colored_diff('Allocated hours today:',
                 f'{(allocated_today - hours_today):+.2f}',
                 invert=True)
Exemple #32
0
def main():
    global return_code, msck_count, msck_command
    return_code = 0
    msck_count = 0
    home = '/data/'
    path = os.path.dirname(os.path.realpath(__file__))
    root = path.split('src/scripts')[0]
    
    #env = path.split('/')[2].split('bdp')[1]
    #env_ver = path.split('/')[4]
    env = 'p'
    env_ver = '01'
    usage = "usage: run_job.py grp_name app sub_app jobnames.list"
    parser = OptionParser(usage)
    (options, args) = parser.parse_args()
    if len(args) < 3:
        parser.error("Arguments - group_job_name and app name are required.")
    global app, sub_app
    grp_name = args[0]
    app = args[1]
    sub_app = args[2]
    jobnames = "jobnames.list"
    
    common_date =""
    if len(args) == 4:
       common_date = args[3].strip()
    else:
       common_date = str(datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S.%f'))
    common_date_tfmt = datetime.strptime(common_date,'%Y-%m-%d_%H:%M:%S.%f')
    asofdate = common_date_tfmt.strftime('%Y-%m-%d')
    log_time = common_date_tfmt.strftime('%Y-%m-%d_%H-%M-%S') 
    rerunjobnames = "jobnames_"+asofdate+".list"
    rerun = "N"

#       rerunjobnames = jobnames
#       rerun = "Y"

    envvars.populate(env,env_ver,app,sub_app)
        

    log_date = common_date_tfmt.strftime('%Y-%m-%d')
    log_folder = envvars.list['lfs_app_logs'] + "/"+log_date
    if not os.path.exists(log_folder):
        os.makedirs(log_folder)
        chmod_log = "chmod 777 "+log_folder
        rc, status = commands.getstatusoutput(chmod_log)
    log_file = log_folder +"/run_job-" + grp_name + '_' + log_time  + '.log'
    global abc_log_file, stdout_file
    abc_log_file = envvars.list['lfs_app_logs'] + "/"+grp_name+".tmp"
    failed_group_name = "@@"+ grp_name + '_' + log_time 
    
    print("LogFile: " + log_file)
    print("To Kill: kill " + str(os.getpid())) 
    f = open(log_file, "a",0)
    f.close()
    stdout_file = open(log_file, "r+",0)
    sys.stdout = stdout_file
    
    global kerb, user_name 
    rc, user_name = commands.getstatusoutput("echo $USER") 
    
    service_user_name = envvars.list['srvc_acct_login_'+app+'_'+sub_app]
    if service_user_name is not None and service_user_name != "":
       user_name = service_user_name
    if not os.path.isfile(envvars.list['lfs_keystore']+user_name.lower()+".keytab "):
       kerb = "kinit -k -t "+envvars.list['lfs_keystore']+"/"+user_name.lower()+".keytab "+user_name.lower()+envvars.list['domainName']
       rc, out = commands.getstatusoutput(kerb)
       print("run_job.py              -> Authenticated    : "+kerb+" RC:"+str(rc))
    else: 
       print("run_job.py              -> Keytab file missing, not able to authenticate. Using user default authentication")
    
    
    
    
    start_line = "".join('*' for i in range(100))
    print start_line   
    print("run_job.py              -> Started    : " + datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
    
    global abc_hdfs_put
    #hdfs_abc_log_file = envvars.list['hdfs_meta_raw']+"/"+envvars.list['hv_db_meta_stage']+"/abc_hadoop/load_date="+str(asofdate)+"/00000.log";
    #abc_hdfs_put = " ".join(["hdfs","dfs","-appendToFile",abc_log_file,
    #     hdfs_abc_log_file]) 
    #hdfs_chmod = "hdfs dfs -chmod -R 777 " + hdfs_abc_log_file 
    #rc, out = commands.getstatusoutput(hdfs_chmod)
    #print("---Output of chmod command of abc_log_file-->"+hdfs_chmod)

    #print("run_job.py              -> Invoked    : " +hdfs_chmod)
    #print out 
    #msck_command =  "beeline -u '" + envvars.list['hive2JDBC'] + ";principal=" + envvars.list['hive2Principal']+"' -e "
    #msck_command = "hive -e "
    #msck_command = msck_command + "'use "+ envvars.list['hv_db_meta_stage']+"; msck repair table abc_hadoop;'"
    
    comments = ""
    # determine joblist file path 
    job_list_file = envvars.list['lfs_app_config'] + '/' + jobnames
    rerun_job_list_file = envvars.list['lfs_app_config'] + '/' + grp_name + "_rerun.list"
    print("run_job.py              -> JobList    : " + job_list_file) 
    
    if os.path.isfile(rerun_job_list_file):
       job_list_file = rerun_job_list_file
       print("run_job.py              -> JobList    : Rerun file found, updating joblist lookup file. Please re-run if original entries has to run.")
       print("run_job.py              -> JobList    : " + job_list_file)
       comments = comments + "Rerun file found "+job_list_file
    else:
       comments = comments + "joblist file " + job_list_file
     
    abc_line = "|".join([grp_name,"run_job.py","python","CA-7 Job","",str(args),"STARTED",
                         user_name,comments.replace(os.linesep,"---"),str(datetime.today())+"\n"]) 
    writeabc(abc_line)
    input_scripts_count = 0
    failed_scripts_count = 0
    failed_scripts = ""
    try:
        with open(job_list_file) as fin:
            for line in fin:
                args = line.split('|')
                if  args[0].strip().lower() == grp_name.lower() or grp_name.lower() == '*all':
                    options = ' --env ' + env + ' --app ' + app + ' --env_ver ' + env_ver + ' --group ' + grp_name
                    options = options + ' --subapp ' + sub_app + ' --cmmn_dt ' + common_date
                    if  len(args) < 3:
                        print("Error: Table name and script name not defined in config file")
                        return None, None, None, None, None, None, None
                    
                    if  len(args) >= 4:
                        job = args[2].strip()
                        if args[1].strip().lower() == 'g':
                            path = envvars.list['lfs_global_scripts']
                        else:
                            path = envvars.list['lfs_app_scripts'] 
                        options = options + ' --op0 ' + args[3].strip()
                    if  len(args) >= 5 and args[4].strip != "":
                        options = options + ' --op1 ' + args[4].strip() 
                    if  len(args) >= 6 and args[5].strip != "":
                        options = options + ' --op2 ' + args[5].strip()
                    if  len(args) >= 7 and args[6].strip != "":
                        options = options + ' --op3 ' + args[6].strip()
                    if  len(args) >= 8 and args[7].strip != "":
                        options = options + ' --op4 ' + args[7].strip() 
                    if  len(args) >= 9 and args[8].strip != "":
                        options = options + ' --op5 ' + args[8].strip()
                    if  len(args) >= 10 and args[9].strip != "":
                        options = options + ' --op6 ' + args[9].strip()
                    if  len(args) >= 11 and args[10].strip != "":
                        options = options + ' --op7 ' + args[10].strip() 
                    if  len(args) >= 12 and args[11].strip != "":
                        options = options + ' --op8 ' + args[11].strip()
                    if  len(args) >= 13 and args[12].strip != "":
                        options = options + ' --op9 ' + args[12].strip()
                    input_scripts_count = input_scripts_count + 1
                    rc = call_script(path, job, options)
                    if rc != 0:
                       failed_scripts_count = failed_scripts_count + 1
                       fs = line.split('|')
                       fs[0] = failed_group_name                       
                       failed_scripts = failed_scripts + line
                    if rc > return_code:
                       return_code = rc 


        
    except IOError as e:
        if  e.errno != errno.ENOENT:
            raise IOError("exception file reading error")
        else:
            print("No joblist file found")
    
    if return_code > 0:
       #if input_scripts_count != failed_scripts_count:
          with open(rerun_job_list_file, 'w') as myfile:
             myfile.write(failed_scripts)
          chmod_log = "chmod 777 "+rerun_job_list_file
          rc, status = commands.getstatusoutput(chmod_log)
          print "run_job.py              -> Failed Script: Some scripts failed.. Please use below command to rerun.."
          print "run_job.py              -> Re-run Cmd   : "+ " ".join(["python",path+"/run_job.py",grp_name,app,sub_app])
          abc_line = "|".join([grp_name,"run_job.py","python","CA-7 Job","",str(args),"FAILED",
                         user_name,"run_job failed, Some scripts failed.." + str(return_code),str(datetime.today())+"\n"]) 
          writeabc(abc_line)
       #else:
       #   print "run_job.py              -> Failed Script: All scripts failed.. Please use below command to rerun.."
       #   print "run_job.py              -> Re-run Cmd   : "+ " ".join(["python",path+"/run_job.py",grp_name,app,sub_app,jobnames])
       #   abc_line = "|".join([grp_name,"run_job.py","python","CA-7 Job","",str(args),"FAILED",
       #                  user_name,"run_job failed, all scripts failed.." + str(return_code),str(datetime.today())+"\n"]) 
       #   writeabc(abc_line)
    elif os.path.isfile(rerun_job_list_file):
       print "run_job.py              -> Deleting..." + str(rerun_job_list_file)
       os.remove(rerun_job_list_file)
       
    abc_line = "|".join([grp_name,"run_job.py","python","CA-7 Job","",str(args),"ENDED",
                         user_name,"run_job ended,Return-Code:" + str(return_code),str(datetime.today())+"\n"]) 
    writeabc(abc_line)
    print("run_job.py              -> Ended      : Return-Code:" + str(return_code)+" " + datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
    print start_line
    silentremove(abc_log_file)
    sys.exit(return_code)
Exemple #33
0
def twitterdate(value):
    from datetime import datetime, timedelta
    time = value.replace(" +0000", "")
    dt = datetime.strptime(time, "%a, %d %b %Y %H:%M:%S")
    return dt + timedelta(hours=-6)
Exemple #34
0
wangDiff = []
wangTime = []
wangURL = 'https://www.ratemyprofessors.com/ShowRatings.jsp?tid=2425127'
wangTime = getTime(wangURL, wangTime)[::-1]
wangScore = calculateAve(getScore(getNum(wangURL, wangNum), wangScore)[::-1])
wangDiff = calculateAve(getDiff(getNum(wangURL, wangNum), wangDiff)[::-1])

totalScore = xiaoScore + wangScore + connScore + koyuScore + louiScore
totalTime = xiaoTime + wangTime + connTime + koyuTime + louiTime
totalDiff = xiaoDiff + wangDiff + connDiff + koyuDiff + louiDiff

total = {totalTime[i]: totalScore[i] for i in range(len(totalTime))}
total2 = {totalTime[i]: totalDiff[i] for i in range(len(totalTime))}

ordered_data = sorted(total.items(),
                      key=lambda x: datetime.strptime(x[0], '%d/%m/%Y'))
ordered_data2 = sorted(total2.items(),
                       key=lambda x: datetime.strptime(x[0], '%d/%m/%Y'))

totalTime = map(lambda x: x[0], ordered_data)
totalTime2 = map(lambda x: x[0], ordered_data2)

totalScore = map(lambda x: x[1], ordered_data)
totalDiff = map(lambda x: x[1], ordered_data2)

totalTime = list(totalTime)
totalScore = list(totalScore)
totalScore = calculateAve(totalScore)

totalTime2 = list(totalTime2)
totalDiff = list(totalDiff)
Exemple #35
0
def main(args):
    httpd = ThreadedHTTPServer((BindIP, BindPort), MyHandler)

    try:
        if os.name == 'nt':
            os.system('cls')
        else:
            os.system('clear')
    except Exception:
        print("cls")
    print(chr(27) + "[2J")
    print(Colours.GREEN + logopic)
    print(Colours.END + "")

    if os.path.isfile(Database):
        print(("Using existing project: %s" % PoshProjectDirectory) + Colours.GREEN)
        database_connect()
        C2 = get_c2server_all()
        if ((C2[1] == PayloadCommsHost) and (C2[3] == DomainFrontHeader)):
            qstart = "%squickstart.txt" % (PoshProjectDirectory)
            if os.path.exists(qstart):
                with open(qstart, 'r') as f:
                    print(f.read())
        else:
            print("Error: different IP so regenerating payloads")
            if os.path.exists("%spayloads_old" % PoshProjectDirectory):
                import shutil
                shutil.rmtree("%spayloads_old" % PoshProjectDirectory)
            os.rename(PayloadsDirectory, "%s:_old" % PoshProjectDirectory)
            os.makedirs(PayloadsDirectory)
            C2 = get_c2server_all()
            newPayload = Payloads(C2[5], C2[2], PayloadCommsHost, DomainFrontHeader, C2[8], C2[12],
                                  C2[13], C2[11], "", "", C2[19], C2[20], C2[21], get_newimplanturl(), PayloadsDirectory)
            new_urldetails("updated_host", PayloadCommsHost, C2[3], "", "", "", "")
            update_item("PayloadCommsHost", "C2Server", PayloadCommsHost)
            update_item("QuickCommand", "C2Server", QuickCommand)
            update_item("DomainFrontHeader", "C2Server", DomainFrontHeader)
            newPayload.CreateRaw()
            newPayload.CreateDlls()
            newPayload.CreateShellcode()
            newPayload.CreateSCT()
            newPayload.CreateHTA()
            newPayload.CreateCS()
            newPayload.CreateMacro()
            newPayload.CreateEXE()
            newPayload.CreateMsbuild()
            newPayload.CreatePython()
            newPayload.WriteQuickstart(PoshProjectDirectory + 'quickstart.txt')

    else:
        print("Initializing new project folder and database" + Colours.GREEN)
        print("")
        directory = os.path.dirname(PoshProjectDirectory)
        if not os.path.exists(PoshProjectDirectory): os.makedirs(PoshProjectDirectory)
        if not os.path.exists(DownloadsDirectory): os.makedirs(DownloadsDirectory)
        if not os.path.exists(ReportsDirectory): os.makedirs(ReportsDirectory)
        if not os.path.exists(PayloadsDirectory): os.makedirs(PayloadsDirectory)
        initializedb()
        if not validate_sleep_time(DefaultSleep):
            print(Colours.RED)
            print("Invalid DefaultSleep in config, please specify a time such as 50s, 10m or 1h")
            print(Colours.GREEN)
            sys.exit(1)
        setupserver(PayloadCommsHost, gen_key().decode("utf-8"), DomainFrontHeader, DefaultSleep, KillDate, HTTPResponse, PoshProjectDirectory, PayloadCommsPort, QuickCommand, DownloadURI, "", "", "", Sounds, ClockworkSMS_APIKEY, ClockworkSMS_MobileNumbers, URLS, SocksURLS, Insecure, UserAgent, Referrer, Pushover_APIToken, Pushover_APIUser, EnableNotifications)
        rewriteFile = "%s/rewrite-rules.txt" % directory
        print("Creating Rewrite Rules in: " + rewriteFile)
        print("")
        rewriteHeader = ["RewriteEngine On", "SSLProxyEngine On", "SSLProxyCheckPeerCN Off", "SSLProxyVerify none", "SSLProxyCheckPeerName off", "SSLProxyCheckPeerExpire off", "# Change IPs to point at C2 infrastructure below", "Define PoshC2 10.0.0.1", "Define SharpSocks 10.0.0.1"]
        rewriteFileContents = rewriteHeader + urlConfig.fetchRewriteRules() + urlConfig.fetchSocksRewriteRules()
        with open(rewriteFile, 'w') as outFile:
            for line in rewriteFileContents:
                outFile.write(line)
                outFile.write('\n')
            outFile.close()

        C2 = get_c2server_all()
        newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], C2[12],
                              C2[13], C2[11], "", "", C2[19], C2[20],
                              C2[21], get_newimplanturl(), PayloadsDirectory)

        new_urldetails("default", C2[1], C2[3], "", "", "", "")
        newPayload.CreateRaw()
        newPayload.CreateDlls()
        newPayload.CreateShellcode()
        newPayload.CreateSCT()
        newPayload.CreateHTA()
        newPayload.CreateCS()
        newPayload.CreateMacro()
        newPayload.CreateEXE()
        newPayload.CreateMsbuild()

        create_self_signed_cert(PoshProjectDirectory)
        newPayload.CreatePython()
        newPayload.WriteQuickstart(directory + '/quickstart.txt')

    print("")
    print("CONNECT URL: " + select_item("PayloadCommsHost", "C2Server") + get_newimplanturl() + Colours.GREEN)
    print("WEBSERVER Log: %swebserver.log" % PoshProjectDirectory)
    global KEY
    KEY = get_baseenckey()
    print("")
    print(time.asctime() + " PoshC2 Server Started - %s:%s" % (BindIP, BindPort))
    from datetime import date, datetime
    killdate = datetime.strptime(C2[5], '%d/%m/%Y').date()
    datedifference = number_of_days(date.today(), killdate)
    if datedifference < 8:
        print (Colours.RED+("\nKill Date is - %s - expires in %s days" % (C2[5],datedifference)))
    else:
        print (Colours.GREEN+("\nKill Date is - %s - expires in %s days" % (C2[5],datedifference)))
    print(Colours.END)

    protocol = urlparse(PayloadCommsHost).scheme

    if protocol == 'https':
        if (os.path.isfile("%sposh.crt" % PoshProjectDirectory)) and (os.path.isfile("%sposh.key" % PoshProjectDirectory)):
            try:
                httpd.socket = ssl.wrap_socket(httpd.socket, keyfile="%sposh.key" % PoshProjectDirectory, certfile="%sposh.crt" % PoshProjectDirectory, server_side=True, ssl_version=ssl.PROTOCOL_TLS)
            except Exception:
                httpd.socket = ssl.wrap_socket(httpd.socket, keyfile="%sposh.key" % PoshProjectDirectory, certfile="%sposh.crt" % PoshProjectDirectory, server_side=True, ssl_version=ssl.PROTOCOL_TLSv1)
        else:
            raise ValueError("Cannot find the certificate files")

    c2_message_thread = threading.Thread(target=log_c2_messages, daemon=True)
    c2_message_thread.start()

    try:
        httpd.serve_forever()
    except (KeyboardInterrupt, EOFError):
        httpd.server_close()
        print(time.asctime() + " PoshC2 Server Stopped - %s:%s" % (BindIP, BindPort))
        sys.exit(0)
Exemple #36
0
def on_message(ws, message):
    global judul
    global nama
    global response3
    global status
    global timeh
    global timehl2
    global xz
    global z
    chat = json.loads(message)
    uid = chat['data']['author']['id']
    nick = chat['data']['author']['nickname']
    evn = chat['event']
    kesurupan = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"Hallo..All Of You How To The Good News Ia Always Yes!!😣"}'
    if 1 == 1:
        if z == 0:
            ws.send(kesurupan)
            z = 1
    if evn == 'live_message':
        psn = chat['data']['message']
        tag = chat['data']['author']['tag']
        print(chat['data']['author']['tag'])
    emot = ['🤭🤭🤭', '🙄🙄🙄', '😝😝😝', '😇😇😇', '😌😌😌', '😔😔😔', '🥺🥺🥺']
    ya = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"YA ' + nick + random.choice(
        emot) + '"}'
    tidak = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"TIDAK ' + nick + random.choice(
        emot) + '"}'
    bisajadi = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"SOKAP IH ' + nick + random.choice(
        emot) + '"}'
    bodoamat = '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":"ANDA KEPO ' + nick + random.choice(
        emot) + '"}'
    listjawaban = [ya, tidak, bisajadi, bodoamat]
    if evn == 'live_message' and psn[:1].lower() == 'a' and psn[-1:] == '?':
        kambeng = random.choice(listjawaban)
        print(kambeng)
        ws.send(kambeng)
    sapa = [
        ' .Wellcome You😊 ', ' .How Are You😋 ',
        ' .Use Your Earphone For Qouality Music🎧 ',
        ' .We Wish You Always Heppiness☺️ ', ' .Dont Forget To Love Big Sis❣ ',
        ' .Always Smiling !! Ya😊 ', ' .Youre Funny Big Sis😁 ',
        ' .How Is Today?☺️ ', ' .Take Care Of Your Health🤭', ' .You Are Cute😁'
    ]
    ljoin = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":" |USER|. ' + nick + random.choice(
        sapa) + '"}'
    lsjoin = '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":"' + nick + ' Open The Spoon Radio Application Dont Fores Bro!! So 👻|GHOST|"}'
    likee = [
        ' .Thenks For Teplove❣️', ' .Thank You For Sprinkling Love☺️❣ ',
        ' .Thank You The Fake Love😁❣ ',
        ' .Uwu Thank You For Wanting To Love😳❣ '
    ]
    llike = '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":" |USER|.' + nick + random.choice(
        likee) + '"}'
    tidur = '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":"l Take A Break Yeah !! 😳"}'
    bangun = '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":"Hello Everyone.How Are You? I Came To Help !! 😳"}'
    likes = '{"appversion":"4.3.16","event":"live_messge","token":" ",useragent","Android","message":"I Help You With A Tap Love Yeah!!!😊"}'
    ping = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"HAHAHAHAHAHAHAHAHAHA !! 😂😂"}'
    makasih = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"' + nick + ' Waalaikumsalam Wr. Wb. !! 😇"}'
    jawab = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"Pagi Juga ' + nick + 'Jangan Lupa Sarapan Ya kak !! 😋"}'
    sokap = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"Siang Juga  ' + nick + 'Jangan Lupa Bahagia Selalu Ya !! 😳"}'
    sendiri = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"Sore Juha ' + nick + ' Jangan Lupa Mandi Ya !!😒"}'
    jee = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"Hai Juga Akak ' + nick + ' Jangan Lupa Pencet Kado Kiri Paling bawah Buat Gift😳"}'
    ljee = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":"Malam Juga  ' + nick + ' Jangan Bergadang Yaa Jaga Kesehatan Akak !! 😇"}'
    promot = '{"appversion":"4.3.16","event":"live_message","token":"","useragent":"Android","message":" Apa Sih Uwu Uwu Akak ' + nick + 'Kek Orang Gila !!😳"}'
    rank = '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":"|INFO| Rank Has Been Updated ' + nick + ' Beloved 😚"}'
    if evn == 'live_message' and (psn == '!status' or psn == '!durasi'
                                  or psn == '!info'):
        headers2 = {'User-Agent': 'Mozilla/5.0'}
        response2 = requests.get(
            ('https://id-api.spooncast.net/lives/' + slink + ''),
            headers=headers2)
        createdl = response2.json()['results'][0]['created']
        tanggal = datetime.today()
        cre = createdl[:-17]
        crea = createdl[11:-8]
        creat = cre + ' ' + crea
        creatdt = datetime.strptime(creat, '%Y-%m-%d %H:%M:%S')
        selisih = tanggal - creatdt
        s1 = '07:00:00'
        s2 = str(selisih)[:-7]
        formatss = '%H:%M:%S'
        timehl2 = datetime.strptime(s2, formatss) - datetime.strptime(
            s1, formatss)
        ws.send(
            '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":"|INFO|ROOM THIS HASS BEEN BROADCAST FOR:🔥🔥'
            + str(timehl2) + '🔥🔥' + ' "}')
    if evn == 'live_join':
        if status == 'bangun':
            ws.send(ljoin)
    if evn == 'live_shadowjoin':
        if status == 'bangun':
            ws.send(lsjoin)
    if evn == 'live_message' and psn == '!like' and status == 'bangun':
        status = 'likes'
        ws.send(likes)
    if evn == 'live_message' and psn == '!off' and status == 'bangun':
        status = 'tidur'
        ws.send(tidur)
    if evn == 'live_message' and psn == '!rank':
        headers3 = {'User-Agent': 'Mozilla/5.0'}
        response3 = requests.get('https://id-api.spooncast.net/lives/popular/',
                                 headers=headers3)
        ws.send(rank)
    if evn == 'live_message' and psn[:-3] == '!rank':
        zz = psn[6:]
        xz = int(zz) - 1
        tanggal = datetime.today()
        nama = response3.json()['results'][xz]['author']['nickname']
        judul = response3.json()['results'][xz]['title']
        created = response3.json()['results'][int(xz)]['created']
        cre = created[:-17]
        crea = created[11:-8]
        creat = cre + ' ' + crea
        creatdt = datetime.strptime(creat, '%Y-%m-%d %H:%M:%S')
        selisih = tanggal - creatdt
        s1 = '07:00:00'
        s2 = str(selisih)[:-7]
        formatss = '%H:%M:%S'
        timeh = datetime.strptime(s2, formatss) - datetime.strptime(
            s1, formatss)
        ws.send(
            '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":"|INFO|Rank '
            + str(xz + 1) + '  Name: ' + nama + ' Title: ' + judul +
            '  Duration -> ' + str(timeh) + ' "}')
    if evn == 'live_message' and psn[:-2] == '!rank':
        zz = psn[6:]
        xz = int(zz) - 1
        tanggal = datetime.today()
        nama = response3.json()['results'][xz]['author']['nickname']
        judul = response3.json()['results'][xz]['title']
        created = response3.json()['results'][int(xz)]['created']
        cre = created[:-17]
        crea = created[11:-8]
        creat = cre + ' ' + crea
        creatdt = datetime.strptime(creat, '%Y-%m-%d %H:%M:%S')
        selisih = tanggal - creatdt
        s1 = '07:00:00'
        s2 = str(selisih)[:-7]
        formatss = '%H:%M:%S'
        timeh = datetime.strptime(s2, formatss) - datetime.strptime(
            s1, formatss)
        ws.send(
            '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":"|INFO|rank '
            + str(xz + 1) + '  Name: ' + nama + ' Title: ' + judul +
            '  durasi -> ' + str(timeh) + ' "}')
    if evn == 'live_message' and psn == '!me':
        print('sjqjajsajajhshsajsjjsjwjwa')
        cid = tag
        headers4 = {'User-Agent': 'Mozilla/5.0'}
        response4 = requests.get(
            ('https://id-api.spooncast.net/search/user/?keyword=' + cid + ''),
            headers=headers4)
        idd = response4.json()['results'][0]['id']
        headers5 = {'User-Agent': 'Mozilla/5.0'}
        response5 = requests.get(
            ('https://id-api.spooncast.net/users/' + str(idd) + '/notice/'),
            headers=headers5)
        nn = response5.json()['results'][0]['bj']['nickname']
        tg = str(response5.json()['results'][0]['bj']['date_joined'])
        tan = tg[:-17]
        tang = tg[11:-8]
        tangg = tan + ' ' + tang
        tangga = datetime.strptime(tangg, '%Y-%m-%d %H:%M:%S')
        ws.send(
            '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":"|INFO|Username '
            + nn + ' The date the account was created -> ' + str(tangga) +
            ' GMT +0 "}')
    if evn == 'live_message' and psn[:4] == '!cek':
        print('sjqjajsajajhshsajsjjsjwjwa')
        cid = psn[5:]
        headers4 = {'User-Agent': 'Mozilla/5.0'}
        response4 = requests.get(
            ('https://id-api.spooncast.net/search/user/?keyword=' + cid + ''),
            headers=headers4)
        idd = response4.json()['results'][0]['id']
        headers5 = {'User-Agent': 'Mozilla/5.0'}
        response5 = requests.get(
            ('https://id-api.spooncast.net/users/' + str(idd) + '/notice/'),
            headers=headers5)
        nn = response5.json()['results'][0]['bj']['nickname']
        tg = str(response5.json()['results'][0]['bj']['date_joined'])
        tan = tg[:-17]
        tang = tg[11:-8]
        tangg = tan + ' ' + tang
        tangga = datetime.strptime(tangg, '%Y-%m-%d %H:%M:%S')
        ws.send(
            '{"appversion":"4.3.16","event":"live_message","token":" ","useragent":"Android","message":"|INFO|Username '
            + nn + ' The date the account was created -> ' + str(tangga) +
            ' GMT +0 "}')
    if evn == 'live_message' and psn == '!on' and status == 'tidur':
        status = 'bangun'
        ws.send(bangun)
    if evn == 'live_message' and psn[:1].lower() == 'w' and pan[-1:] == 'k':
        ws.send(ping)
    if evn == 'live_message' and psn == 'uwu':
        ws.send(promot)
    if evn == 'live_message' and psn == 'hai':
        ws.send(jee)
    if evn == 'live_message' and psn == 'malam':
        ws.send(ljee)
    if evn == 'live_message' and psn == 'assalamualaikum':
        ws.send(makasih)
    if evn == 'live_message' and psn == 'pagi':
        ws.send(jawab)
    if evn == 'live_message' and psn == 'siang':
        ws.send(sokap)
    if evn == 'live_message' and psn == 'sore':
        ws.send(sendiri)
    if evn == 'live_message':
        if psn == 'non keluar' and uid == '210900010':
            ws.close()
Exemple #37
0
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from datetime import datetime, timedelta

data = pd.read_csv("../data/training_20min_avg_volume_original.csv")

data["start_time"] = data["time_window"].apply(lambda x: datetime.strptime(
    x.lstrip("[").split(",")[0], "%Y-%m-%d %H:%M:%S"))
data["date"] = data["start_time"].apply(lambda x: datetime.date(x))
data["time"] = data["start_time"].apply(lambda x: datetime.time(x))
data["day_of_week"] = data["start_time"].apply(lambda x: x.isoweekday())
data["is_weekend"] = data["day_of_week"].apply(lambda x: 1 if x >= 6 else 0)
data = data.sort(["date", "time"])
drop_date_start = datetime.strptime('2016-09-30', '%Y-%m-%d').date()
data_drop = data
for i in range(9):
    data = data_drop[data_drop["date"] != drop_date_start +
                     timedelta(hours=i * 24)]
print(data.columns)
#data.to_csv("drop_national_day_training.csv", index=False)

id1 = data[data["tollgate_id"] == 1]
id1out = id1[id1["direction"] == 1]
id1in = id1[id1["direction"] == 0]
id2 = data[data["tollgate_id"] == 2]
id2in = id2[id2["direction"] == 0]
id3 = data[data["tollgate_id"] == 3]
id3out = id3[id3["direction"] == 1]
id3in = id3[id3["direction"] == 0]
print(id1in["volume"].values.shape)
Exemple #38
0
    path = path_base % (index)
    print('Dowloading AVHRR_OI_SST >>>  date: %s,  DayNumber: %s' % (datetime.strftime(dt, '%Y-%m-%d'),index))
    req.urlretrieve(url,path)
    #decompress(path)
    #os.remove(path)
    return





if len(sys.argv)!=3:
  print('Enter 2 arguments as follow: Year-Month-Day Year-Month-Day')
  exit()

dt1 = datetime.strptime(sys.argv[1], '%Y-%m-%d')
dt2 = datetime.strptime(sys.argv[2], '%Y-%m-%d')

dt = dt1

while(dt<=dt2):
    try:
        get_nrt_avhrr_oi(dt.year, dt.month, dt.day)
    except Exception as e:
        print('================================')
        print('Error on %s' % datetime.strftime(dt, '%Y-%m-%d'))
        print('Error Message: %s' % e)
        print('Short wait ....')
        sleep(10)
        print('================================')
        get_nrt_avhrr_oi(dt.year, dt.month, dt.day)
for i, tds in enumerate(trs):
    assert (len(ths) == len(tds))
    rowlabel = tds[0]
    if 'Davon PCR' in rowlabel or 'Davon Antigen' in rowlabel:
        continue
    mo = re.search(
        r'Stand (\d\d\.\d\d\.\d\d\d\d)[,\.][ \xa0]*(\d?\d[:.]\d\d) ?Uhr',
        tds[0])
    if mo is None:
        print("cannot parse date:", tds[0], file=sys.stderr)
        sys.exit(1)
    sdate = mo.group(1)
    stime = mo.group(2).replace('.', ':')
    if len(stime) == 4:
        stime = '0' + stime
    parse = fetchhelper.ParseData(update, labels[i])
    datadate = parse.parsedtime = datetime.strptime(
        sdate + ' ' + stime, '%d.%m.%Y %H:%M').replace(tzinfo=datatz)
    with open(parse.parsedfile, 'w') as f:
        cw = csv.writer(f)
        cw.writerow(['Area', 'Date', 'Value'])
        for col in range(1, len(tds) - 1):
            area = areas[strip_footnote(ths[col])]
            count = cleannum(tds[col])
            cw.writerow([area, datadate.isoformat(), count])
    parse.deploy_timestamp()
    parses.append(parse)

fetchhelper.git_commit(parses, args)
def load_data(vertexdata_path):
    # read data from vertex.csv and cluster the events
    print "events clustering..."
    vertexcsv_r = csv.reader(open(vertexdata_path, 'r'))
    mindatediff = 999999
    mintimediff = 999999
    maxdatediff = 0
    maxtimediff = 0
    data = []
    time = []
    latitude = []
    longitude = []
    vertexdata = []
    for lineindex, line in enumerate(vertexcsv_r):
        vertexdata.append(line)
        if line[1] == 'image' and line[4] != '' and line[5] != '-999' and line[
                6] != '-999':
            #print line[3],line[4],line[5],line[6]
            #print '/home/zz/album_graph_data/photos/'+line[3][36:]
            if line[4][11:13] == '24':
                line[4] = line[4][:11] + '00' + line[4][13:]
            #print datetime.strptime(str(line[4]),"%Y/%m/%d %H:%M:%S")
            a = datetime.strptime(str(line[4]), "%Y/%m/%d %H:%M:%S")
            b = datetime.strptime(str("1970/01/01 00:00:00"),
                                  "%Y/%m/%d %H:%M:%S")
            if (a - b).days < mindatediff or ((a - b).days == mindatediff and
                                              (a - b).seconds <= mintimediff):
                mindatediff = (a - b).days
                mintimediff = (a - b).seconds
                mindate = a
            if (a - b).days >= maxdatediff and (a - b).seconds >= maxtimediff:
                maxdatediff = (a - b).days
                maxtimediff = (a - b).seconds
                maxdate = a
            # image_path,datetime,latitude,longitude,lineindex(for rewrite)
            data.append(
                [line[3], a,
                 float(line[5]),
                 float(line[6]), lineindex])
    # clear the data
    newdata = []
    rawdata = []
    for d in data:
        newdata.append([
            d[0], (d[1] - mindate).days + float(
                (d[1] - mindate).seconds) / (24 * 60 * 60), d[2], d[3]
        ])
        time.append((d[1] - mindate).days + float((d[1] - mindate).seconds) /
                    (24 * 60 * 60))
        latitude.append(d[2])
        longitude.append(d[3])
        time_weight = 1
        latitude_weight = 10
        longitude_weight = 10
        rawdata.append([(d[1] - mindate).days + float(
            (d[1] - mindate).seconds) / (24 * 60 * 60) * time_weight,
                        d[2] * latitude_weight, d[3] * longitude_weight])
    time = np.array(time)
    latitude = np.array(latitude)
    longitude = np.array(longitude)
    rawdata = np.array(rawdata)
    return vertexdata, data, time, latitude, longitude, rawdata
def no2nwb(NOData, session_use, subjects_ini, path_to_data):
    '''
       Purpose:
           Import the data and associated meta-data from the new/old recognition dataset into an
           NWB file. Each of the features of the dataset, such as the events (i.e., TTLs) or mean waveform, are
           compartmentalized to the appropriate component of the NWB file.


    '''

    # Time scaling (covert uS -----> S for NWB file)
    TIME_SCALING = 10**6

    # Prepare the NO data that will be coverted to the NWB format

    session = NOData.sessions[session_use]
    events = NOData._get_event_data(session_use, experiment_type='All')
    cell_ids = NOData.ls_cells(session_use)
    experiment_id_learn = session['experiment_id_learn']
    experiment_id_recog = session['experiment_id_recog']
    task_descr = session['task_descr']

    # Get the metadata for the subject
    # ============ Read Config File ()
    # load config file (subjects == config file)

    #  Check config file path
    filename = subjects_ini
    if not os.path.exists(filename):
        print('This file does not exist: {}'.format(filename))
        print("Check filename/and or directory")

    # Read the config file
    try:
        # initialze the ConfigParser() class
        config = configparser.ConfigParser()
        # read .ini file
        config.read(filename)
    except:
        print('Failed to read the config file..')
        print('Does this file exist: {}'.format(os.path.exists(filename)))

    #  Read Meta-data from INI file.
    for section in config.sections():
        if session_use == int(section):
            session_id = int(section)  #  The New/Old ID for the session
            #Get the session ID
            for value in config[section]:
                if value.lower() == 'nosessions.age':
                    age = int(config[section][value])
                if value.lower() == 'nosessions.diagnosiscode':
                    epilepsyDxCode = config[section][value]
                    epilepsyDx = getEpilepsyDx(int(epilepsyDxCode))
                if value.lower() == 'nosessions.sex':
                    sex = config[section][value].strip("'")
                if value.lower() == 'nosessions.id':
                    ID = config[section][value].strip("'")
                if value.lower() == 'nosessions.session':
                    pt_session = config[section][value].strip("'")
                if value.lower() == 'nosessions.date':
                    unformattedDate = config[section][value].strip("'")
                    date = datetime.strptime(unformattedDate, '%Y-%m-%d')
                    finaldate = date.replace(hour=0, minute=0)
                if value.lower() == 'nosessions.institution':
                    institution = config[section][value].strip("'")
                if value.lower() == 'nosessions.la':
                    LA = config[section][value].strip("'").split(',')
                    if LA[0] == 'NaN':
                        LA_x = np.nan
                        LA_y = np.nan
                        LA_z = np.nan
                    else:
                        LA_x = float(LA[0])
                        LA_y = float(LA[1])
                        LA_z = float(LA[2])
                if value.lower() == 'nosessions.ra':
                    RA = config[section][value].strip("'").split(',')
                    if RA[0] == 'NaN':
                        RA_x = np.nan
                        RA_y = np.nan
                        RA_z = np.nan
                    else:
                        RA_x = float(RA[0])
                        RA_y = float(RA[1])
                        RA_z = float(RA[2])
                if value.lower() == 'nosessions.lh':
                    LH = config[section][value].strip("'").split(',')
                    if LH[0] == 'NaN':
                        LH_x = np.nan
                        LH_y = np.nan
                        LH_z = np.nan
                    else:
                        LH_x = float(LH[0])
                        LH_y = float(LH[1])
                        LH_z = float(LH[2])
                if value.lower() == 'nosessions.rh':
                    RH = config[section][value].strip("'").split(',')
                    if RH[0] == 'NaN':
                        RH_x = np.nan
                        RH_y = np.nan
                        RH_z = np.nan
                    else:
                        RH_x = float(RH[0])
                        RH_y = float(RH[1])
                        RH_z = float(RH[2])
                if value.lower() == 'nosessions.system':
                    signalSystem = config[section][value].strip("'")

    # =================================================================

    print(
        '======================================================================='
    )
    print('session use: {}'.format(session_id))
    print('age: {}'.format(age))
    print('epilepsy_diagnosis: {}'.format(epilepsyDx))

    nwb_subject = Subject(age=str(age),
                          description=epilepsyDx,
                          sex=sex,
                          species='Human',
                          subject_id=pt_session)

    # Create the NWB file
    nwbfile = NWBFile(
        #source='https://datadryad.org/bitstream/handle/10255/dryad.163179/RecogMemory_MTL_release_v2.zip',
        session_description='New/Old recognition task for ID: {}. '.format(
            session_id),
        identifier='{}_{}'.format(ID, session_use),
        session_start_time=finaldate,  #default session start time
        file_create_date=datetime.now(),
        experiment_description=
        'The data contained within this file describes a new/old recogntion task performed in '
        'patients with intractable epilepsy implanted with depth electrodes and Behnke-Fried '
        'microwires in the human Medical Temporal Lobe (MTL).',
        institution=institution,
        keywords=[
            'Intracranial Recordings', 'Intractable Epilepsy',
            'Single-Unit Recordings', 'Cognitive Neuroscience', 'Learning',
            'Memory', 'Neurosurgery'
        ],
        related_publications=
        'Faraut et al. 2018, Scientific Data; Rutishauser et al. 2015, Nat Neurosci;',
        lab='Rutishauser',
        subject=nwb_subject,
        data_collection='learning: {}, recognition: {}'.format(
            session['experiment_id_learn'], session['experiment_id_recog']))

    # Add events and experiment_id acquisition
    events_description = (
        """ The events coorespond to the TTL markers for each trial. For the learning trials, the TTL markers 
            are the following: 55 = start of the experiment, 1 = stimulus ON, 2 = stimulus OFF, 3 = Question Screen Onset [“Is this an animal?”], 
            20 = Yes (21 = NO) during learning, 6 = End of Delay after Response, 66 = End of Experiment. For the recognition trials, 
            the TTL markers are the following: 55 = start of experiment, 1 = stimulus ON, 2 = stimulus OFF, 3 = Question Screen Onset [“Have you seen this image before?”], 
            31:36 = Confidence (Yes vs. No) response [31 (new, confident), 32 (new, probably), 33 (new, guess), 34 (old, guess), 
            35 (old, probably), 36 (old, confident)], 66 = End of Experiment"""
    )

    event_ts = AnnotationSeries(name='events',
                                data=np.asarray(events[1].values).astype(str),
                                timestamps=np.asarray(events[0].values) /
                                TIME_SCALING,
                                description=events_description)

    experiment_ids_description = (
        """The experiment_ids coorespond to the encoding (i.e., learning) or recogniton trials. The learning trials are demarcated by: {}. The recognition trials are demarcated by: {}. """
        .format(experiment_id_learn, experiment_id_recog))

    experiment_ids = TimeSeries(name='experiment_ids',
                                unit='NA',
                                data=np.asarray(events[2]),
                                timestamps=np.asarray(events[0].values) /
                                TIME_SCALING,
                                description=experiment_ids_description)

    nwbfile.add_acquisition(event_ts)
    nwbfile.add_acquisition(experiment_ids)

    # Add stimuli to the NWB file
    # Get the first cell from the cell list
    cell = NOData.pop_cell(session_use,
                           NOData.ls_cells(session_use)[0], path_to_data)
    trials = cell.trials
    stimuli_recog_path = [trial.file_path_recog for trial in trials]
    stimuli_learn_path = [trial.file_path_learn for trial in trials]

    # Add epochs and trials: storing start and end times for a stimulus

    # First extract the category ids and names that we need
    # The metadata for each trials will be store in a trial table

    cat_id_recog = [trial.category_recog for trial in trials]
    cat_name_recog = [trial.category_name_recog for trial in trials]
    cat_id_learn = [trial.category_learn for trial in trials]
    cat_name_learn = [trial.category_name_learn for trial in trials]

    # Extract the event timestamps
    events_learn_stim_on = events[(events[2] == experiment_id_learn) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_learn_stim_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_learn_delay1_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay1_off'])]
    events_learn_delay2_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay2_off'])]
    events_learn = events[(events[2] == experiment_id_learn)]
    events_learn_response = []
    events_learn_response_time = []
    for i in range(len(events_learn[0])):
        if (events_learn.iloc[i, 1]
                == NOData.markers['response_learning_animal']) or (
                    events_learn.iloc[i, 1]
                    == NOData.markers['response_learning_non_animal']):
            events_learn_response.append(events_learn.iloc[i, 1] - 20)
            events_learn_response_time.append(events_learn.iloc[i, 0])

    events_recog_stim_on = events[(events[2] == experiment_id_recog) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_recog_stim_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_recog_delay1_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay1_off'])]
    events_recog_delay2_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay2_off'])]
    events_recog = events[(events[2] == experiment_id_recog)]
    events_recog_response = []
    events_recog_response_time = []
    for i in range(len(events_recog[0])):
        if ((events_recog.iloc[i, 1] == NOData.markers['response_1'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_2'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_3'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_4'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_5'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_6'])):
            events_recog_response.append(events_recog.iloc[i, 1])
            events_recog_response_time.append(events_recog.iloc[i, 0])

    # Extract new_old label
    new_old_recog = [trial.new_old_recog for trial in trials]
    # Create the trial tables

    nwbfile.add_trial_column('stim_on_time',
                             'The Time when the Stimulus is Shown')
    nwbfile.add_trial_column('stim_off_time',
                             'The Time when the Stimulus is Off')
    nwbfile.add_trial_column('delay1_time', 'The Time when Delay1 is Off')
    nwbfile.add_trial_column('delay2_time', 'The Time when Delay2 is Off')
    nwbfile.add_trial_column('stim_phase',
                             'Learning/Recognition Phase During the Trial')
    nwbfile.add_trial_column('stimCategory', 'The Category ID of the Stimulus')
    nwbfile.add_trial_column('category_name',
                             'The Category Name of the Stimulus')
    nwbfile.add_trial_column('external_image_file',
                             'The File Path to the Stimulus')
    nwbfile.add_trial_column(
        'new_old_labels_recog',
        '''The Ground truth Labels for New or Old Stimulus. 0 == Old Stimuli 
                            (presented during the learning phase), 1 = New Stimuli (not seen )'during learning phase'''
    )
    nwbfile.add_trial_column('response_value',
                             'The Response for Each Stimulus')
    nwbfile.add_trial_column('response_time',
                             'The Response Time for each Stimulus')

    range_recog = np.amin([
        len(events_recog_stim_on),
        len(events_recog_stim_off),
        len(events_recog_delay1_off),
        len(events_recog_delay2_off)
    ])
    range_learn = np.amin([
        len(events_learn_stim_on),
        len(events_learn_stim_off),
        len(events_learn_delay1_off),
        len(events_learn_delay2_off)
    ])

    # Iterate the event list and add information into each epoch and trial table
    for i in range(range_learn):

        nwbfile.add_trial(
            start_time=(events_learn_stim_on.iloc[i][0]) / (TIME_SCALING),
            stop_time=(events_learn_delay2_off.iloc[i][0]) / (TIME_SCALING),
            stim_on_time=(events_learn_stim_on.iloc[i][0]) / (TIME_SCALING),
            stim_off_time=(events_learn_stim_off.iloc[i][0]) / (TIME_SCALING),
            delay1_time=(events_learn_delay1_off.iloc[i][0]) / (TIME_SCALING),
            delay2_time=(events_learn_delay2_off.iloc[i][0]) / (TIME_SCALING),
            stim_phase='learn',
            stimCategory=cat_id_learn[i],
            category_name=cat_name_learn[i],
            external_image_file=stimuli_learn_path[i],
            new_old_labels_recog='NA',
            response_value=events_learn_response[i],
            response_time=(events_learn_response_time[i]) / (TIME_SCALING))

    for i in range(range_recog):

        nwbfile.add_trial(
            start_time=events_recog_stim_on.iloc[i][0] / (TIME_SCALING),
            stop_time=events_recog_delay2_off.iloc[i][0] / (TIME_SCALING),
            stim_on_time=events_recog_stim_on.iloc[i][0] / (TIME_SCALING),
            stim_off_time=events_recog_stim_off.iloc[i][0] / (TIME_SCALING),
            delay1_time=events_recog_delay1_off.iloc[i][0] / (TIME_SCALING),
            delay2_time=events_recog_delay2_off.iloc[i][0] / (TIME_SCALING),
            stim_phase='recog',
            stimCategory=cat_id_recog[i],
            category_name=cat_name_recog[i],
            external_image_file=stimuli_recog_path[i],
            new_old_labels_recog=new_old_recog[i],
            response_value=events_recog_response[i],
            response_time=events_recog_response_time[i] / (TIME_SCALING))

    # Add the waveform clustering and the spike data.
    # Get the unique channel id that we will be iterate over
    channel_ids = np.unique([cell_id[0] for cell_id in cell_ids])

    # unique unit id
    unit_id = 0

    # Create unit columns
    nwbfile.add_unit_column('origClusterID', 'The original cluster id')
    nwbfile.add_unit_column('waveform_mean_encoding',
                            'The mean waveform for encoding phase.')
    nwbfile.add_unit_column('waveform_mean_recognition',
                            'The mean waveform for the recognition phase.')
    nwbfile.add_unit_column('IsolationDist', 'IsolDist')
    nwbfile.add_unit_column('SNR', 'SNR')
    nwbfile.add_unit_column('waveform_mean_sampling_rate',
                            'The Sampling Rate of Waveform')

    #Add Stimuli
    stimuli_presentation = []

    # Add stimuli learn
    counter = 1
    for path in stimuli_learn_path:
        if path == 'NA':
            continue
        folders = path.split('\\')

        path = os.path.join(path_to_data, 'Stimuli', folders[0], folders[1],
                            folders[2])
        img = cv2.imread(path)
        resized_image = cv2.resize(img, (300, 400))
        stimuli_presentation.append(resized_image)

    # Add stimuli recog
    counter = 1
    for path in stimuli_recog_path:
        folders = path.split('\\')
        path = os.path.join(path_to_data, 'Stimuli', folders[0], folders[1],
                            folders[2])
        img = cv2.imread(path)
        resized_image = cv2.resize(img, (300, 400))
        stimuli_presentation.append(resized_image)
        name = 'stimuli_recog_' + str(counter)

    # Add stimuli to OpticalSeries
    stimulus_presentation_on_time = []

    for n in range(0, len(events_learn_stim_on)):
        stimulus_presentation_on_time.append(events_learn_stim_on.iloc[n][0] /
                                             (TIME_SCALING))

    for n in range(0, len(events_recog_stim_on)):
        stimulus_presentation_on_time.append(events_recog_stim_on.iloc[n][0] /
                                             (TIME_SCALING))

    name = 'StimulusPresentation'
    stimulus = OpticalSeries(name=name,
                             data=stimuli_presentation,
                             timestamps=stimulus_presentation_on_time[:],
                             orientation='lower left',
                             format='raw',
                             unit='meters',
                             field_of_view=[.2, .3, .7],
                             distance=0.7,
                             dimension=[300, 400, 3])

    nwbfile.add_stimulus(stimulus)

    # Get Unit data
    all_spike_cluster_ids = []
    all_selected_time_stamps = []
    all_IsolDist = []
    all_SNR = []
    all_selected_mean_waveform_learn = []
    all_selected_mean_waveform_recog = []
    all_mean_waveform = []
    all_channel_id = []
    all_oriClusterIDs = []
    all_channel_numbers = []
    all_brain_area = []
    # Iterate the channel list

    # load brain area file
    brain_area_file_path = os.path.join(path_to_data, 'Data', 'events',
                                        session['session'], task_descr,
                                        'brainArea.mat')

    try:
        brain_area_mat = loadmat(brain_area_file_path)
    except FileNotFoundError:
        print("brain_area_mat file not found")

    for channel_id in channel_ids:
        cell_name = 'A' + str(channel_id) + '_cells.mat'
        cell_file_path = os.path.join(path_to_data, 'Data', 'sorted',
                                      session['session'], task_descr,
                                      cell_name)

        try:
            cell_mat = loadmat(cell_file_path)
        except FileNotFoundError:
            print("cell mat file not found")
            continue

        spikes = cell_mat['spikes']
        meanWaveform_recog = cell_mat['meanWaveform_recog']
        meanWaveform_learn = cell_mat['meanWaveform_learn']
        IsolDist_SNR = cell_mat['IsolDist_SNR']

        spike_cluster_id = np.asarray([spike[1] for spike in spikes
                                       ])  # Each Cluster ID of the spike
        spike_timestamps = np.asarray(
            [spike[2]
             for spike in spikes])  # Timestamps of spikes for each ClusterID
        unique_cluster_ids = np.unique(spike_cluster_id)

        # If there are more than one cluster.
        for id in unique_cluster_ids:

            # Grab brain area
            brain_area = extra_brain_area(brain_area_mat, channel_id)

            selected_spike_timestamps = spike_timestamps[spike_cluster_id ==
                                                         id]
            IsolDist, SNR = extract_IsolDist_SNR_by_cluster_id(
                IsolDist_SNR, id)
            selected_mean_waveform_learn = extra_mean_waveform(
                meanWaveform_learn, id)
            selected_mean_waveform_recog = extra_mean_waveform(
                meanWaveform_recog, id)

            # If the mean waveform does not have 256 elements, we set the mean wave form to all 0
            if len(selected_mean_waveform_learn) != 256:
                selected_mean_waveform_learn = np.zeros(256)
            if len(selected_mean_waveform_recog) != 256:
                selected_mean_waveform_recog = np.zeros(256)

            mean_waveform = np.hstack(
                [selected_mean_waveform_learn, selected_mean_waveform_recog])

            # Append unit data
            all_spike_cluster_ids.append(id)
            all_selected_time_stamps.append(selected_spike_timestamps)
            all_IsolDist.append(IsolDist)
            all_SNR.append(SNR)
            all_selected_mean_waveform_learn.append(
                selected_mean_waveform_learn)
            all_selected_mean_waveform_recog.append(
                selected_mean_waveform_recog)
            all_mean_waveform.append(mean_waveform)
            all_channel_id.append(channel_id)
            all_oriClusterIDs.append(int(id))
            all_channel_numbers.append(channel_id)
            all_brain_area.append(brain_area)

            unit_id += 1

    nwbfile.add_electrode_column(
        name='origChannel',
        description='The original channel ID for the channel')

    #Add Device
    device = nwbfile.create_device(name=signalSystem)

    # Add Electrodes (brain Area Locations, MNI coordinates for microwires)
    length_all_spike_cluster_ids = len(all_spike_cluster_ids)
    for electrodeNumber in range(0, len(channel_ids)):

        brainArea_location = extra_brain_area(brain_area_mat,
                                              channel_ids[electrodeNumber])

        if brainArea_location == 'RH':  #  Right Hippocampus
            full_brainArea_Location = 'Right Hippocampus'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            #Add Electrode
            nwbfile.add_electrode([channel_ids[electrodeNumber]],
                                  x=RH_x,
                                  y=RH_y,
                                  z=RH_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])

        if brainArea_location == 'LH':
            full_brainArea_Location = 'Left Hippocampus'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            nwbfile.add_electrode([all_channel_id[electrodeNumber]],
                                  x=LH_x,
                                  y=LH_y,
                                  z=LH_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])
        if brainArea_location == 'RA':
            full_brainArea_Location = 'Right Amygdala'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            nwbfile.add_electrode([all_channel_id[electrodeNumber]],
                                  x=RA_x,
                                  y=RA_y,
                                  z=RA_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])
        if brainArea_location == 'LA':
            full_brainArea_Location = 'Left Amygdala'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            nwbfile.add_electrode([all_channel_id[electrodeNumber]],
                                  x=LA_x,
                                  y=LA_y,
                                  z=LA_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])

    # Create Channel list index
    channel_list = list(range(0, length_all_spike_cluster_ids))
    unique_channel_ids = np.unique(all_channel_id)
    length_ChannelIds = len(np.unique(all_channel_id))
    for yy in range(0, length_ChannelIds):
        a = np.array(np.where(unique_channel_ids[yy] == all_channel_id))
        b = a[0]
        c = b.tolist()
        for i in c:
            channel_list[i] = yy

    #Add WAVEFORM Sampling RATE
    waveform_mean_sampling_rate = [98.4 * 10**3]
    waveform_mean_sampling_rate_matrix = [waveform_mean_sampling_rate
                                          ] * (length_all_spike_cluster_ids)

    # Add Units to NWB file
    for index_id in range(0, length_all_spike_cluster_ids):
        nwbfile.add_unit(
            id=index_id,
            spike_times=all_selected_time_stamps[index_id],
            origClusterID=all_oriClusterIDs[index_id],
            IsolationDist=all_IsolDist[index_id],
            SNR=all_SNR[index_id],
            waveform_mean_encoding=all_selected_mean_waveform_learn[index_id],
            waveform_mean_recognition=all_selected_mean_waveform_recog[
                index_id],
            electrodes=[channel_list[index_id]],
            waveform_mean_sampling_rate=waveform_mean_sampling_rate_matrix[
                index_id])

    return nwbfile
#find the prior month row to be updated
Fidelity_last_row = len(fidelity_sheet.col_values(8)) + 1  #this is the row after the last row with data in the Actual Value column
Vanguard_last_row = len(vanguard_sheet.col_values(8)) + 1


# In[ ]:


#this is the month of the prior month that needs to be updated
prior_month = (datetime.today() + timedelta(days=-datetime.today().day)).date().replace(day=1)


# In[ ]:


#update the 8th column with the new value for actual and the 3rd column for deposits
#check if the row for last month has already been populated
if (datetime.strptime(fidelity_sheet.row_values(Fidelity_last_row)[1],'%b-%y').date() == prior_month): 
    fidelity_sheet.update_cell(Fidelity_last_row,8,value_dict['Ending_Balance']); #update the ending balance
    fidelity_sheet.update_cell(Fidelity_last_row,3,value_dict['Total_Contributions']); #update the total deposit
    print('Fidelity Updated')
else:
    print('No update to Fidelity')
if (datetime.strptime(vanguard_sheet.row_values(Vanguard_last_row)[1],'%b-%y').date() == prior_month):
    vanguard_sheet.update_cell(Vanguard_last_row,8,Vanguard_PriorMonthEndingBalance); #update the ending balance
    vanguard_sheet.update_cell(Vanguard_last_row,3,Vanguard_PriorMonthDeposit); #update the total deposit
    print('Vanguard Updated')
else: #if it has, then no update is to be done
    print('No update to Vanguard')
Exemple #43
0
 def timestr_to_epoch(s1):
     fmt = '%Y-%m-%d %H:%M:%S'
     return datetime.strptime(s1, fmt).timestamp()
Exemple #44
0
def valid_date(s):
    try:
        return datetime.strptime(s, "%Y-%m-%d")
    except ValueError:
        msg = "Not a valid date: '{0}'.".format(s)
        raise argparse.ArgumentTypeError(msg)
Exemple #45
0
def parse_time(time_string):
    return time.mktime(datetime.strptime(time_string, "%Y/%m/%d").timetuple())
Exemple #46
0
 def sort_(profile):
     return datetime.strptime(profile['birthdate'], '%d/%m/%Y')
Exemple #47
0
 def date(datestr="", format="%Y-%m-%d"):
     from datetime import datetime
     if not datestr:
         return datetime.today().date()
     return datetime.strptime(datestr, format).date()
Exemple #48
0
async def checkin_setup(client, message, **kwargs):
    """Setup the checkin process for participants
    Required Arguments:
    date -- date of the tournament: YYYY/MM/DD
    time -- time of the tournament: HH:MM (24h format)
    duration -- length of the participant check-in window in minutes.
    """
    # verify date
    date = get_date(kwargs.get('date'))
    if not date:
        await client.send_message(
            message.channel, '❌ Wrong date format. Must be `YYYY/MM/DD`.')
        return

    # verify time
    time = get_time(kwargs.get('time'))
    if not time:
        await client.send_message(
            message.channel,
            '❌ Wrong time format. Must be `HH:MM` (24h format).')
        return

    # verify duration
    try:
        duration = int(kwargs.get('duration'))
    except ValueError:
        await client.send_message(message.channel,
                                  '❌ Duration must be an integer')
        return
    else:
        if duration <= 0:
            await client.send_message(message.channel,
                                      '❌ Duration must be a positive integer')
            return

    # combime date & time
    full_date_time = datetime.strptime(
        kwargs.get('date') + ' ' + kwargs.get('time'), '%Y/%m/%d %H:%M')

    try:
        await kwargs.get('account').tournaments.update(
            kwargs.get('tournament_id'),
            start_at=full_date_time,
            check_in_duration=duration)
    except ChallongeException as e:
        await client.send_message(message.author,
                                  T_OnChallongeException.format(e))
    else:
        await client.send_message(
            message.channel,
            '✅ Start date and check-in duration have been processed')

        try:
            t = await kwargs.get('account').tournaments.show(
                kwargs.get('tournament_id'),
                include_participants=1,
                include_matches=1)
        except ChallongeException:
            log_commands_def.exception('')
        else:
            await update_channel_topic(kwargs.get('account'), t, client,
                                       message.channel)
Exemple #49
0
    @classmethod
    def get_cn_now(cls):
        utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
        format_pattern = '%H:%M'
        cn_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
        now = cn_dt.strftime(format_pattern)
        return now

    @classmethod
    def str_time_to_float(cls, str_time):
        str_hour = str_time[0:2]
        str_min = str_time[3:]
        hour = float(str_hour)
        if str_min == '30':
            hour = hour + 0.5
        return hour


if __name__ == '__main__':

    while True:
        nowDate = datetime.now().strftime('%Y/%m/%d ')
        rsv_time = datetime.strptime(nowDate + "23:40:00", "%Y/%m/%d %H:%M:%S")
        nowTime = datetime.now()
        if ((nowTime > rsv_time) == True):
            break

    print(rsv_time)
    print(nowTime)
    print(nowTime > rsv_time)
def todatetime(input):
    output = datetime.strptime(input, '%H:%M:%S').time()
    return output
Exemple #51
0
def GetDateInt(date):
    # Convert String format to DateTime (input format): datetime.strptime(date_str, '%Y-%m-%d')
    # Convert DateTime back to String: dateTime.strftime('%Y%m%d')
    date = datetime.strptime(date, '%Y-%m-%d').date()
    date_str = date.strftime('%Y%m%d')
    return int(date_str)
Exemple #52
0
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 25 15:06:58 2019

@author: snoone
"""

import datetime
basedate = "1"
formatfrom = "%H"
formatto = " %H GMT"
print(datetime.datetime.strptime(basedate, formatfrom).strftime(formatto))

from datetime import datetime
from pytz import timezone

date_str = "10"
datetime_obj_naive = datetime.strptime(date_str, "%H")

datetime_obj_pacific = timezone('US/Pacific').localize(datetime_obj_naive)
print(datetime_obj_pacific.strftime("%H"))
Exemple #53
0
    def get(self, request, *args, **kwargs):

        compras_servicios = ComprasServicios.objects.get(pk=kwargs['pk'])
        detalle = DetalleRequerido.objects.filter(
            compras_servicios_id=kwargs['pk'])

        buffer = BytesIO()
        c = canvas.Canvas(buffer, pagesize=A4)

        PAGE_WIDTH = defaultPageSize[0]
        PAGE_HEIGHT = defaultPageSize[1]

        # Header
        logo = ImageReader(STATICFILES_DIRS[0] + '/dist/img/logoApe.png')
        c.drawImage(logo, 30, 757, 0.75 * inch, 0.75 * inch)

        text = 'Administracion Provincial de Energia'

        c.setFont('Helvetica', 8)
        c.drawString(90, 790, text)
        c.drawString(90, 780, 'Falucho n°585 - Santa Rosa - La Pampa')
        c.drawString(90, 770, 'Tel: (02954) 429590')

        # Esquina superior derecha
        # Lineas verticales
        c.line(400, 780, 400, 810)
        c.line(570, 810, 570, 780)
        # Lineas horizontales
        c.line(400, 810, 570, 810)
        c.line(570, 780, 400, 780)

        c.setFont('Helvetica', 10)
        c.drawString(410, 790,
                     'N° ....................... / .......................')

        # Transforma el formato de 99-99-9999 a 99/99/9999
        fecha_pedido = datetime.strptime(str(compras_servicios.fecha_pedido),
                                         "%Y-%m-%d").strftime("%d/%m/%Y")

        c.drawString(440, 760, 'Fecha: ' + str(fecha_pedido))

        # Cuadro.principal
        c.line(30, 750, 570, 750)
        c.line(570, 750, 570, 625)
        c.line(570, 625, 30, 625)
        c.line(30, 625, 30, 750)

        c.drawString(
            35, 737,
            'REQUISICIÓN DE COMPRA DE BIENES Y/O SERVICIOS (COMPRA DIRECTA) / PRESUPUESTO'
        )
        c.drawString(35, 723, 'MOTIVO DEL REQUERIMIENTO:')

        # Funcion que agrega saltos de linea a 'motivo' para que se pinte en el pdf
        j = 0
        n = 88
        story = ''
        for i in range(len(compras_servicios.motivo)):
            if compras_servicios.motivo[i] == '\n':
                n = i + 88
            if i == n:
                story = story + compras_servicios.motivo[j:n] + '\n'
                j = n
                n = n + 88
        story = story + \
            compras_servicios.motivo[j:len(compras_servicios.motivo)]

        # Texto que va contenido dentro de los detalles de trabajo
        textobject = c.beginText()
        textobject.setTextOrigin(35, 710)
        textobject.setFont("Courier", 10)
        textobject.textLines(story)
        c.drawText(textobject)

        # Cuandro.vehiculos
        c.setFont('Helvetica', 10)
        c.line(30, 667, 570, 666)
        c.drawString(35, 655, 'Sólo para reparación de vehículos:')

        # Funcion que agrega saltos de linea a 'motivo' para que se pinte en el pdf
        j = 0
        n = 61
        story = ''
        for i in range(len(compras_servicios.rep_vehículo)):
            if compras_servicios.rep_vehículo[i] == '\n':
                n = i + 61
            if i == n:
                story = story + compras_servicios.rep_vehículo[j:n] + '\n'
                j = n
                n = n + 61
        story = story + \
            compras_servicios.rep_vehículo[j:len(
                compras_servicios.rep_vehículo)]

        # Texto que va contenido dentro de los detalles de trabajo
        textobject = c.beginText()
        textobject.setTextOrigin(198, 655)
        textobject.setFont("Courier", 10)
        textobject.textLines(story)
        c.drawText(textobject)

        c.setFont('Helvetica', 10)
        if compras_servicios.transporte:
            c.drawString(
                200, 632,
                'N° de legajo: ' + compras_servicios.transporte.num_legajo)
            c.drawString(380, 632,
                         'Dominio: ' + compras_servicios.transporte.patente)
        else:
            c.drawString(200, 632, 'N° de legajo: ')
            c.drawString(380, 632, 'Dominio: ')

        # tabla.encabezado
        styles = getSampleStyleSheet()
        styleBH = styles["Normal"]
        styleBH.fontSize = 10

        nombre = Paragraph('''DETALLE DE LO REQUERIDO''', styleBH)
        dia = Paragraph('''MONTO''', styleBH)

        data = []
        data.append([nombre, dia])

        styleN = styles["BodyText"]
        styleN.alignment = TA_CENTER
        styleN.fontSize = 7

        # tabla.contenico
        contenido = []
        # El rango determina la cantidad de cuadros que se pintan en el documento
        for i in range(13):
            contenido.append({
                'name': '',
                'b1': '',
            })

        hight1 = 607  # comienzo de la tabla

        for part in contenido:
            this_part = [
                part['name'],
                part['b1'],
            ]
            data.append(this_part)
            hight1 = hight1 - 18

        width, height = A4
        table = Table(data, colWidths=[
            15 * cm,
            4.05 * cm,
        ])
        table.setStyle(
            TableStyle([
                ('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
                ('BOX', (0, 0), (-1, -1), 0.25, colors.black),
            ]))

        table.wrapOn(c, width, height)
        table.drawOn(c, 30, hight1)

        hight1 = 594
        delta = 18
        c.setFont('Helvetica', 9)
        monto_total = 0

        for i in range(len(detalle)):
            c.drawString(35, hight1, detalle[i].detalle_requerido)
            c.drawString(460, hight1, str(detalle[i].monto))
            hight1 = hight1 - delta
            monto_total = monto_total + detalle[i].monto

        c.setFont('Helvetica', 10)
        c.drawString(355, 378, 'TOTAL $ IVA incluido')
        c.drawString(460, 378,
                     str("{:.2f}".format(monto_total + (monto_total * 0.21))))

        # Destino.localidad.requirente
        c.drawString(147, 355, 'DESTINO: ')
        c.drawString(210, 355, compras_servicios.destino)
        c.line(200, 370, 570, 370)  # l.horizontal
        c.line(570, 370, 570, 350)  # l.vertical
        c.line(570, 350, 200, 350)  # l.horizontal
        c.line(200, 370, 200, 350)  # l.vertical

        c.drawString(135, 330, 'LOCALIDAD: ')
        c.drawString(210, 330, compras_servicios.localidad.ciudad)
        c.line(200, 342, 570, 342)  # l.horizontal
        c.line(570, 342, 570, 322)  # l.vertical
        c.line(570, 322, 200, 322)  # l.horizontal
        c.line(200, 322, 200, 342)  # l.vertical

        c.drawString(126, 300, 'REQUIRENTE:')
        c.drawString(210, 300, compras_servicios.user.get_full_name())
        c.line(200, 315, 570, 315)  # l.horizontal
        c.line(570, 315, 570, 295)  # l.vertical
        c.line(570, 295, 200, 295)  # l.horizontal
        c.line(200, 295, 200, 315)  # l.vertical

        # Firmas
        firm = 'Nombre y firma jefe departamento'
        width = stringWidth(firm, 'Helvetica', 10)
        c.setFont('Helvetica', 10)
        x = (PAGE_WIDTH / 2) - (width / 2)
        c.drawString(x, 225, firm)
        c.line(220, 240, 380, 240)
        c.drawString(240, 210, 'Legajo n° .........................')
        # c.showPage()

        c.line(30, 240, 190, 240)
        c.drawString(60, 225, 'Firma del requirente')
        c.drawString(50, 210, 'Legajo n° .........................')

        c.line(410, 240, 570, 240)
        c.drawString(440, 225, 'Gerente/Adm. Gral.')
        c.drawString(430, 210, 'Legajo n° .........................')

        # Cuadro.procedimiento
        c.drawString(35, 188, 'Procedimiento:')

        c.setFont('Helvetica', 8)
        c.drawString(
            35, 175,
            '1) SOLICITUD DEL BIEN O PRESTACIÓN DE SERVICIO. 2) PRESUPUESTO/S REQUERIDO/S. 3) AUTORIZACION JEFE DE DEPARTAMENTO'
        )
        c.drawString(
            35, 165,
            'Y GERENTE/ADMINISTRADOR GENERAL. 4) CONSTATACION DE LA EFECTIVA PRESTACION DE LOS SERVICIOS Y/O REMITO DE'
        )
        c.drawString(
            35, 155,
            'RECEPCIÓN DE LOS BIENES. CONFECCIÓN DE PLANILLA DE CARGO, CUANDO CORRESPONDA. 5) FACTURA DEL PROVEEDOR.'
        )
        c.drawString(
            35, 145,
            '6) ORDEN DE PROVISION (CONTADURÍA) CON TODO LO ANTERIOR ANEXADO.')

        c.line(30, 200, 570, 200)  # L.hor
        c.line(570, 200, 570, 130)  # L.vert
        c.line(570, 130, 30, 130)  # L.hor
        c.line(30, 130, 30, 200)  # L.vert

        # Cuadro.ultima.firma
        c.setFont('Helvetica', 10)
        c.drawString(
            35, 118,
            'RECIBI DE CONFORMIDAD LOS BIENES ADQUIRIDOS Y/O HE VERIFICADO LA PRESTACIÓN EFECTIVA DEL'
        )
        c.drawString(35, 103, 'SERVICIO EL DÍA: ……/……/……')
        c.line(570, 130, 570, 20)  # L.vert
        c.line(570, 20, 30, 20)  # L.hor
        c.line(30, 20, 30, 130)  # L.vert

        c.line(400, 50, 560, 50)  # firma
        c.drawString(410, 40, 'Nombre y firma del Requirente/ ')
        c.drawString(415, 25, 'Jefe depto./Gte./Adm. Gral.')

        c.setFont('Helvetica', 12)

        # Funcion que agrega saltos de linea
        j = 0
        n = 87
        story = ''

        # Texto que va contenido dentro de los detalles de trabajo
        textobject = c.beginText()
        textobject.setTextOrigin(35, 208)
        textobject.setFont("Courier", 10)

        c.drawText(textobject)

        c.save()
        pdf = buffer.getvalue()
        buffer.close()
        response = HttpResponse(pdf, content_type='application/pdf')
        response['Content-Dispotition'] = 'filename=Reporte-Anticipo.pdf'
        return response
def convert_time(tm):
    tm = datetime.strptime(tm, "%Y-%m-%dT%H:%M:%S.%f")
    return tm
Exemple #55
0
 def date_validate(self, v_date):
     try:
         datetime.strptime(v_date, '%Y-%m-%d')
     except ValueError:
         sys.exit("Incorrect data format, check your dates , they should be like: YYYY-MM-DD")
Exemple #56
0
def main():

    #df = pandas.DataFrame(columns=["_id","department", "account_name"])

    documents = []

    new_row = {"_id": "1" , \
                            "department": "account.department", \
                            "account_name":"account.account_name"
                             }

    #df = df.append(new_row, ignore_index=True)

    documents.append(new_row)

    new_row = {"_id": "2" , \
                            "department": "account.department", \
                            "account_name":"account.account_name"
                             }

    documents.append(new_row)
    #df = df.append(new_row, ignore_index=True)

    #print(df)

    print("***************************************************************")

    #documents = df.to_dict(orient='records')

    print(documents)
    print(type(documents))

    return

    group = {
        'Keys': ['AWS CloudTrail'],
        'Metrics': {
            'AmortizedCost': {
                'Amount': '0.108152',
                'Unit': 'USD'
            },
            'BlendedCost': {
                'Amount': '0.108152',
                'Unit': 'USD'
            }
        }
    }

    print(group['Metrics'])

    for metric in group['Metrics']:
        print(group['Metrics'][metric]['Amount'])

    return

    return

    # Create CloudWatch client
    cloudwatch = boto3.client('cloudwatch')

    # List metrics through the pagination interface
    paginator = cloudwatch.get_paginator('list_metrics')
    for response in paginator.paginate(  #Dimensions=[{'Name': 'LogGroupName'}],
            #MetricName='IncomingLogEvents',
            Namespace='AWS/EC2'):
        for row in response['Metrics']:
            print(row['MetricName'])
            for row2 in row['Dimensions']:
                print(row2['Name'])
                print(row2['Value'])

    return

    cloudwatch = boto3.client('cloudwatch')
    start_time = "2021-04-01"
    end_time = "2021-04-05"
    start_time = start_time + 'T00:00:00Z'
    end_time = end_time + 'T00:00:00Z'

    start_time = datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%SZ')
    end_time = datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%SZ')

    response = cloudwatch.get_metric_statistics(Namespace='AWS/EC2',
                                                Dimensions=[{
                                                    'Name':
                                                    'InstanceId',
                                                    'Value':
                                                    ec2.instance_id
                                                }],
                                                MetricName='CPUUtilization',
                                                StartTime=start_time,
                                                EndTime=end_time,
                                                Period=3600,
                                                Statistics=['Average'])

    datapoints = response["Datapoints"]

    print(datapoints)

    return

    ec2_list = []
    session = boto3.Session()
    ec2 = session.resource('ec2')
    instances = ec2.instances.filter()

    for instance in instances:
        availability_zone = instance.placement["AvailabilityZone"]
        state = instance.state['Name']
        account_number = instance.network_interfaces_attribute[0]['OwnerId']
        account_name = Utility.map_account_name_to_account_number(
            account_number)
        department = Utility.map_department_to_account(account_number)

    return

    print(Utility.get_service_namespace('Amazon API Gateway2'))
    return

    client = boto3.client('sts')
    response = client.get_caller_identity()

    print(response)

    return

    aws_service = AwsService()

    ec2_list = aws_service.get_aws_describe_instances()

    print(ec2_list)

    ec2 = ec2_list[0]

    df_cost = aws_service.get_aws_cost_and_usage_with_resources(
        ec2=ec2,
        start_time='2021-02-14T00:00:00Z',
        end_time='2021-02-15T00:00:00Z',
        granularity="HOURLY",
        metrics="AmortizedCost")

    print(df_cost)

    return

    #get_aws_cost_and_usage_with_resources

    targetES = Elasticsearch(
        "https://*****:*****@c11f5bc9787c4c268d3b960ad866adc2.eu-central-1.aws.cloud.es.io:9243"
    )

    p = IngestClient(targetES)

    if not p.get_pipeline(id="account-cost-threshold"):
        p.put_pipeline(
            id='account-cost-threshold_2',
            body={
                'description':
                "add threshold",
                'processors': [{
                    "set": {
                        "field": "_source.ingest_time",
                        "value": "{{_ingest.timestamp}}"
                    }
                }, {
                    "script": {
                        "lang":
                        "painless",
                        "source":
                        "\r\n          if (ctx.containsKey(\"pu\")) { \r\n            String unit = ctx['pu'];\r\n            int value;\r\n            if (unit == \"SAST\") { \r\n              value = 25000; \r\n            } \r\n            else if (unit == \"CxGo\") { \r\n              value = 15000; \r\n            } \r\n            else if (unit == \"AST\") { \r\n              value = 7000; \r\n            } \r\n            else if (unit == \"AST Integration\") { \r\n              value = 1000; \r\n            } \r\n            else if (unit == \"CB\") { \r\n              value = 5000; \r\n            } \r\n            else if (unit == \"SCA\") {\r\n              value = 85000; \r\n            } \r\n            else {\r\n              value = 20000; \r\n            }\r\n            ctx['threshold_value'] = value;\r\n          }\r\n        "
                    }
                }]
            })

    if not p.get_pipeline(id="ec2-cost-idle-cpu"):
        p.put_pipeline(
            id='ec2-cost-idle-cpu',
            body={
                'description':
                "add idle_text, cpu_percent fields",
                'processors': [{
                    "script": {
                        "lang":
                        "painless",
                        "source":
                        "\r\n          if (ctx.containsKey(\"is_idle\")) { \r\n            String text;\r\n            int idle;\r\n            if (ctx.is_idle instanceof byte ||\r\n                ctx.is_idle instanceof short ||\r\n                ctx.is_idle instanceof int ||\r\n                ctx.is_idle instanceof long ||\r\n                ctx.is_idle instanceof float ||\r\n                ctx.is_idle instanceof double)\r\n            {\r\n                idle = (int)ctx.is_idle;\r\n            } else {  \r\n                idle = Integer.parseInt(ctx['is_idle']);\r\n            }\r\n            if (idle == 0) { \r\n              text = \"In Use\";\r\n            } else if (idle == 1) {\r\n              text = \"Potential Waste\";\r\n            } else {\r\n              text = \"\";\r\n            }\r\n            ctx['idle_text'] = text;\r\n          }\r\n          float cpu;\r\n          if (ctx.containsKey(\"cpu_utilization\")) {\r\n            if (ctx.cpu_utilization instanceof byte ||\r\n                ctx.cpu_utilization instanceof short ||\r\n                ctx.cpu_utilization instanceof int ||\r\n                ctx.cpu_utilization instanceof long ||\r\n                ctx.cpu_utilization instanceof float ||\r\n                ctx.cpu_utilization instanceof double)\r\n            {\r\n                cpu = (float)ctx.cpu_utilization/100;\r\n            } else {   \r\n              cpu = Float.parseFloat(ctx['cpu_utilization'])/100;\r\n            }\r\n            ctx['cpu_percent'] = cpu;\r\n          }\r\n        "
                    }
                }]
            })

    return

    request_body = {
        "index_patterns": ["ec2-cost-*"],
        "settings": {
            "number_of_shards": 1,
            "number_of_replicas": 1
        },
        'mappings': {
            'properties': {
                'start_time': {
                    'format': 'dateOptionalTime',
                    'type': 'date'
                },
                'cpu_utilization': {
                    'type': 'float'
                },
                'network_in': {
                    'type': 'float'
                },
                'network_out': {
                    'type': 'float'
                },
                'disk_write_ops': {
                    'type': 'float'
                },
                'disk_read_ops': {
                    'type': 'float'
                },
                'disk_write_bytes': {
                    'type': 'float'
                },
                'disk_read_bytes': {
                    'type': 'float'
                },
                'ebs_write_bytes': {
                    'type': 'float'
                },
                'ebs_read_bytes': {
                    'type': 'float'
                },
                'is_idle': {
                    'type': 'short'
                },
                'availability_zone': {
                    'type': 'keyword'
                },
                'instance_id': {
                    'type': 'keyword'
                },
                'instance_type': {
                    'type': 'keyword'
                },
                'launch_time': {
                    'format': 'dateOptionalTime',
                    'type': 'date'
                },
                'state': {
                    'type': 'keyword'
                },
                'ebs_optimized': {
                    'type': 'keyword'
                },
                'account_number': {
                    'type': 'keyword'
                },
                'pu': {
                    'type': 'keyword'
                },
                'account_name': {
                    'type': 'keyword'
                },
                'cost': {
                    'type': 'float'
                },
            }
        }
    }

    if not targetES.indices.exists_template("ec2-cost-template"):
        targetES.indices.put_template("ec2-cost-template",
                                      request_body,
                                      create=True)

    return

    return

    ec2_list = []
    session = boto3.Session()
    ec2 = session.resource('ec2')
    instances = ec2.instances.filter()

    for instance in instances:
        availability_zone = instance.placement["AvailabilityZone"]
        state = instance.state['Name']

    return

    client = boto3.client('cloudwatch')

    metric_list = [
        'CPUUtilization', 'NetworkOut', 'NetworkIn', 'EBSWriteBytes',
        'EBSReadBytes', 'DiskReadBytes', 'DiskWriteBytes'
    ]  #'NetworkPacketsOut','NetworkPacketsIn','DiskWriteOps','DiskReadOps']

    aws_service = AwsService()

    start_date = '2021-01-17'
    end_date = '2021-01-18'
    instance_id = 'i-0d4dc0ddfe07c9259'

    ec2_list = aws_service.get_aws_describe_instances()

    ec2 = ec2_list[0]

    response = client.list_metrics(Namespace='AWS/EC2',
                                   Dimensions=[
                                       {
                                           'Name': 'InstanceId',
                                           'Value': ec2.instance_id
                                       },
                                   ])

    for metric in response['Metrics']:
        if metric['MetricName'] in metric_list:
            print(metric['MetricName'])

    #pprint.pprint(response['Metrics'])

    return

    frames = []

    for metric_name in metric_list:
        statistics = 'Average'
        namespace = 'AWS/EC2'
        instance_id = ec2.instance_id
        period = 3600
        start_time = start_date
        end_time = end_date

        df = aws_service.get_aws_metric_statistics(ec2, metric_name, period,
                                                   start_time, end_time,
                                                   namespace, statistics)

        if not df.empty:
            frames.append(df)

    print(frames)

    return

    cloudwatch = boto3.client('cloudwatch')

    response = cloudwatch.get_metric_statistics(Namespace='AWS/EC2',
                                                Dimensions=[{
                                                    'Name':
                                                    'InstanceId',
                                                    'Value':
                                                    'i-0c825168d7ad6093a'
                                                }],
                                                MetricName="CPUUtilization",
                                                StartTime='2021-01-16',
                                                EndTime='2021-01-19',
                                                Period=3600,
                                                Statistics=['Average'])

    pprint.pprint(response)
    return

    dt = '2021-01-01T00:00:00Z'
    dt_time = datetime.datetime.strptime(dt, '%Y-%m-%dT%H:%M:%SZ')
    dt_time = dt_time.astimezone()
    print(dt_time)
    return

    client = boto3.client('ce')

    response = client.get_cost_and_usage_with_resources(
        TimePeriod={
            'Start': '2021-01-01',
            'End': '2021-01-02'
        },
        Metrics=['AmortizedCost'],
        Granularity='HOURLY',
        Filter={
            "And": [{
                "Dimensions": {
                    "Key": "SERVICE",
                    "MatchOptions": ["EQUALS"],
                    "Values": ["Amazon Elastic Compute Cloud - Compute"]
                }
            }, {
                "Dimensions": {
                    "Key": "RESOURCE_ID",
                    "MatchOptions": ["EQUALS"],
                    "Values": ["i-0d4dc0ddfe07c9259"]
                }
            }]
        })

    datapoints = response["ResultsByTime"]

    for datapoint in datapoints:
        dt = datapoint['TimePeriod']['Start']
        dt_time = datetime.datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S')
        dt_time = dt_time.astimezone()
        print(dt_time)
        #print(datapoint['TimePeriod']['Start'])
        #print(datapoint['Total']['AmortizedCost']['Amount'])

    #pprint.pprint(response)

    return

    session = boto3.Session()
    ec2 = session.resource('ec2')
    instances = ec2.instances.filter()

    client = boto3.client('lambda', region_name='eu-west-1')
    response = client.list_functions()

    pprint.pprint(response)
    return

    id = boto3.client('sts').get_caller_identity().get('Account')

    name = boto3.client('organizations').describe_account(
        AccountId=id).get('Account').get('Name')

    print(name)

    return

    client = boto3.client('ce')
    '''
    response = client.get_cost_and_usage(
        TimePeriod={
            'Start': '2021-01-02',
            'End': '2021-01-02'
        },
        Granularity='DAILY',
        Metrics=['AMORTIZED_COST'],
        GroupBy=[
        {
            'Type': 'DIMENSION',
            'Key': 'SERVICE'
        }]
    )

    pprint.pprint(response)
    return
    '''

    response = client.get_cost_forecast(
        TimePeriod={
            'Start': '2021-01-06',
            'End': '2021-02-01'
        },
        Metric='AMORTIZED_COST',
        Granularity='MONTHLY',
        PredictionIntervalLevel=80,
    )

    print(response['ForecastResultsByTime'][0]['MeanValue'])
    print(response['ForecastResultsByTime'][0]['PredictionIntervalLowerBound'])
    print(response['ForecastResultsByTime'][0]['PredictionIntervalUpperBound'])

    return

    given_date = '2020-05-05'

    date_time_obj = datetime.datetime.strptime(given_date, '%Y-%m-%d')

    first_day_of_month = date_time_obj.replace(day=1)

    next_month = first_day_of_month + relativedelta(months=+1)

    first_day_of_month_str = first_day_of_month.strftime('%Y-%m-%d')

    print(first_day_of_month)
    print(first_day_of_month_str)
    print(next_month)
    return

    client = boto3.client('sts')

    response = client.get_caller_identity()

    print(response['Account'])

    return

    client = boto3.client('lambda')

    response = client.update_function_configuration(
        FunctionName='billingoptimizations-prod-calcBillingOptimizations',
        Environment={'Variables': {
            'TEST': '11111'
        }},
    )

    response = client.get_function_configuration(
        FunctionName='Cx-CircleCi-Pipeliene-Status-Shipper')

    print(response)
    return

    client = boto3.client('ce')
    '''
    response = client.get_cost_and_usage(
        TimePeriod={
            'Start': '2020-09-01',
            'End': '2020-12-01'
        },
        Granularity='MONTHLY',
        Metrics=['AMORTIZED_COST'],
        GroupBy=[
        {
            'Type': 'DIMENSION',
            'Key': 'SERVICE'
        }]
    )

    print(response)
    return
    '''

    response = client.get_cost_forecast(
        TimePeriod={
            'Start': '2021-01-05',
            'End': '2021-01-06'
        },
        Metric='AMORTIZED_COST',
        Granularity='DAILY',
        PredictionIntervalLevel=80,
    )
    '''
        Filter = {      
            "Dimensions": {
            "Key": "SERVICE",
            "Values": ["AWS CloudTrail","EC2 - Other"]
            }
        }
    '''

    pprint.pprint(response)

    return

    targetES = Elasticsearch(
        "https://*****:*****@c11f5bc9787c4c268d3b960ad866adc2.eu-central-1.aws.cloud.es.io:9243"
    )

    now = datetime.datetime.now()
    target_index_name = "account-billing-" + now.strftime("%m-%Y")

    request_body = {
        "settings": {
            "number_of_shards": 5,
            "number_of_replicas": 1
        },
        'mappings': {
            'properties': {
                'account': {
                    'type': 'text'
                },
                'keys': {
                    'type': 'text'
                },
                'amount': {
                    'type': 'float'
                },
                'start_time': {
                    'format': 'dateOptionalTime',
                    'type': 'date'
                },
                'end_time': {
                    'format': 'dateOptionalTime',
                    'type': 'date'
                },
                'metrics': {
                    'type': 'text'
                },
            }
        }
    }

    targetES.indices.create(index=target_index_name, body=request_body)

    return

    response = client.get_cost_and_usage(TimePeriod={
        'Start': '2020-09-01',
        'End': '2020-12-01'
    },
                                         Granularity='MONTHLY',
                                         Metrics=['AMORTIZED_COST'],
                                         GroupBy=[{
                                             'Type': 'DIMENSION',
                                             'Key': 'SERVICE'
                                         }])

    pprint.pprint(response)

    for row in response['ResultsByTime']:
        pprint.pprint(row['TimePeriod']['Start'])
        pprint.pprint(row['TimePeriod']['End'])
        for group in row['Groups']:
            pprint.pprint(group['Keys'][0])
            pprint.pprint(group['Metrics']['AmortizedCost']['Amount'])
            key_list = list(group['Metrics'].keys())
            pprint.pprint(key_list[0])

        print("************************************")

    return

    result = response['ResultsByTime'][0]['Groups']
    pprint.pprint(result)

    return

    for row in result:
        print(row['Keys'][0])
        print(row['Metrics']['AmortizedCost']['Amount'])
        print("********************")
    #print(result)

    #pprint.pprint(result)

    #pprint.pprint(response)

    return

    #print(response['Environment']['Variables'])

    results = response['Environment']['Variables']

    for key in results.keys():
        print(results[key])

    run_parallel()
Exemple #57
0
    
s  = time.mktime(st)    # 将struct_time格式转换成wall clock time
st = time.gmtime(s)   
for s in st:
    print(s)
    
st = time.clock   # 返回struct_time格式的当地时间, 当地时区根据系统环境决定。
print(st)


t = datetime.datetime(2012,9,3,21,30)
print(t)

t      = datetime.datetime(2012,9,3,21,30)
t_next = datetime.datetime(2012,9,5,23,30)
delta1 = datetime.timedelta(seconds = 600)
delta2 = datetime.timedelta(weeks = 3)
print(t + delta1)
print(t + delta2)
print(t_next - t)


from datetime import datetime
format = "output-%Y-%m-%d-%H%M%S.txt" 
str    = "output-1997-12-23-030000.txt" 
t      = datetime.strptime(str, format)
print(t)



Exemple #58
0
def get_last(date_list):
    x = sorted(date_list,
               key=lambda xx: datetime.strptime(xx, '%Y-%m-%d %H:%M'))
    return x[-1]
Exemple #59
0
def excheck_table():
    try:
        from os import listdir
        from pathlib import PurePath, Path
        from datetime import datetime
        from flask import request
        if request.method == "GET":
            jsondata = {"ExCheck": {'Templates': [], 'Checklists': []}}
            from FreeTAKServer.controllers.ExCheckControllers.templateToJsonSerializer import templateSerializer
            excheckTemplates = DatabaseController().query_ExCheck()
            for template in excheckTemplates:
                templateData = template.data
                templatejson = {
                    "filename":
                    templateData.filename,
                    "name":
                    templateData.keywords.name,
                    "submissionTime":
                    templateData.submissionTime,
                    "submitter":
                    str(
                        dbController.query_user(
                            query=f'uid == "{template.creatorUid}"',
                            column=['callsign'])),
                    "uid":
                    templateData.uid,
                    "hash":
                    templateData.hash,
                    "size":
                    templateData.size,
                    "description":
                    templateData.keywords.description
                }
                jsondata["ExCheck"]['Templates'].append(templatejson)
            excheckChecklists = DatabaseController().query_ExCheckChecklist()
            for checklist in excheckChecklists:
                try:
                    templatename = checklist.template.data.name
                except AttributeError:
                    templatename = "template removed"
                checklistjson = {
                    "filename":
                    checklist.filename,
                    "name":
                    checklist.name,
                    "startTime":
                    datetime.strftime(checklist.startTime,
                                      "%Y-%m-%dT%H:%M:%S.%fZ"),
                    "submitter":
                    checklist.callsign,
                    "uid":
                    checklist.uid,
                    "description":
                    checklist.description,
                    "template":
                    templatename
                }
                jsondata["ExCheck"]['Checklists'].append(checklistjson)
            return json.dumps(jsondata), 200

        elif request.method == "DELETE":
            jsondata = request.data
            ExCheckArray = json.loads(jsondata)["ExCheck"]
            for item in ExCheckArray["Templates"]:
                templateitem = DatabaseController().query_ExCheck(
                    f'ExCheckData.uid == "{item["uid"]}"', verbose=True)[0]
                os.remove(
                    str(
                        PurePath(Path(MainConfig.ExCheckFilePath),
                                 Path(templateitem.data.filename))))
                DatabaseController().remove_ExCheck(
                    f'PrimaryKey == "{templateitem.PrimaryKey}"')
            for item in ExCheckArray["Checklists"]:
                checklistitem = DatabaseController().query_ExCheckChecklist(
                    f'uid == "{item["uid"]}"')[0]
                os.remove(
                    str(
                        PurePath(Path(MainConfig.ExCheckChecklistFilePath),
                                 Path(checklistitem.filename))))
                DatabaseController().remove_ExCheckChecklist(
                    f'uid == "{item["uid"]}"')
            return 'success', 200
        elif request.method == "POST":
            try:
                import uuid
                from FreeTAKServer.controllers.ExCheckControllers.templateToJsonSerializer import templateSerializer
                xmlstring = f'<?xml version="1.0"?><event version="2.0" uid="{uuid.uuid4()}" type="t-x-m-c" time="2020-11-28T17:45:51.000Z" start="2020-11-28T17:45:51.000Z" stale="2020-11-28T17:46:11.000Z" how="h-g-i-g-o"><point lat="0.00000000" lon="0.00000000" hae="0.00000000" ce="9999999" le="9999999" /><detail><mission type="CHANGE" tool="ExCheck" name="exchecktemplates" authorUid="S-1-5-21-2720623347-3037847324-4167270909-1002"><MissionChanges><MissionChange><contentResource><filename>61b01475-ad44-4300-addc-a9474ebf67b0.xml</filename><hash>018cd5786bd6c2e603beef30d6a59987b72944a60de9e11562297c35ebdb7fd6</hash><keywords>test init</keywords><keywords>dessc init</keywords><keywords>FEATHER</keywords><mimeType>application/xml</mimeType><name>61b01475-ad44-4300-addc-a9474ebf67b0</name><size>1522</size><submissionTime>2020-11-28T17:45:47.980Z</submissionTime><submitter>wintak</submitter><tool>ExCheck</tool><uid>61b01475-ad44-4300-addc-a9474ebf67b0</uid></contentResource><creatorUid>S-1-5-21-2720623347-3037847324-4167270909-1002</creatorUid><missionName>exchecktemplates</missionName><timestamp>2020-11-28T17:45:47.983Z</timestamp><type>ADD_CONTENT</type></MissionChange></MissionChanges></mission></detail></event>'
                # this is where the client will post the xmi of a template
                from datetime import datetime
                from lxml import etree
                import hashlib
                # possibly the uid of the client submitting the template
                authoruid = request.args.get('clientUid')
                if not authoruid:
                    authoruid = 'server-uid'
                XMI = request.data.decode()
                serializer = templateSerializer(XMI)
                object = serializer.convert_template_to_object()
                object.timestamp = datetime.strptime(object.timestamp,
                                                     "%Y-%m-%dT%H:%M:%S.%fZ")
                serializer.create_DB_object(object)
                xml = etree.fromstring(XMI)
                path = str(
                    PurePath(Path(MainConfig.ExCheckFilePath),
                             Path(f'{object.data.uid}.xml')))
                with open(path, 'w+') as file:
                    file.write(XMI)
                    file.close()

                uid = object.data.uid
                temp = etree.fromstring(XMI)
                cot = etree.fromstring(xmlstring)
                cot.find('detail').find('mission').set("authorUid", authoruid)
                resources = cot.find('detail').find('mission').find(
                    'MissionChanges').find('MissionChange').find(
                        'contentResource')
                resources.find('filename').text = temp.find(
                    'checklistDetails').find('uid').text + '.xml'
                resources.findall('keywords')[0].text = temp.find(
                    'checklistDetails').find('name').text
                resources.findall('keywords')[1].text = temp.find(
                    'checklistDetails').find('description').text
                resources.findall('keywords')[2].text = temp.find(
                    'checklistDetails').find('creatorCallsign').text
                resources.find('uid').text = temp.find(
                    'checklistDetails').find('uid').text
                resources.find('name').text = temp.find(
                    'checklistDetails').find('uid').text
                resources.find('size').text = str(len(XMI))
                resources.find('hash').text = str(
                    hashlib.sha256(str(XMI).encode()).hexdigest())
                z = etree.tostring(cot)
                from FreeTAKServer.model.testobj import testobj
                object = testobj()
                object.xmlString = z
                APIPipe.send(object)
                return str(uid), 200
            except Exception as e:
                print(str(e))
    except Exception as e:
        return str(e), 500
Exemple #60
0
def parser(x):
    return datetime.strptime(x, '%Y %m %d %H')