Beispiel #1
0
 def poser(self, objet, personnage, qtt=1):
     """On pose l'objet."""
     Salle = importlib.import_module("primaires.salle.salle").Salle
     if isinstance(objet.contenu, Salle) and \
             (datetime.now() - objet.apparition).seconds < 100:
         objet.apparition = datetime.now()
     BaseType.poser(self, objet, personnage)
Beispiel #2
0
    def new_func():

        starttime = datetime.now()

        func()

        print("exectime: ", datetime.now() - starttime)
Beispiel #3
0
def index(request):
    context = RequestContext(request)

    category_list = Category.objects.all()
    context_dict = {'categories': category_list}

    for category in category_list:
        category.url = encode_url(category.name)

    page_list = Page.objects.order_by('-views')[:5]
    context_dict['pages'] = page_list

    #### NEW CODE ####
    if request.session.get('last_visit'):
        # The session has a value for the last visit
        last_visit_time = request.session.get('last_visit')
        visits = request.session.get('visits', 0)

        if (datetime.now() - datetime.strptime(last_visit_time[:-7], "%Y-%m-%d %H:%M:%S")).days > 0:
            request.session['visits'] = visits + 1
            request.session['last_visit'] = str(datetime.now())
    else:
        # The get returns None, and the session does not have a value for the last visit.
        request.session['last_visit'] = str(datetime.now())
        request.session['visits'] = 1
    #### END NEW CODE ####

    # Render and return the rendered response back to the user.
    return render_to_response('rango/index.html', context_dict, context)
Beispiel #4
0
def time_tensorflow_run(session, target, info_string):
  num_steps_burn_in = 10
  total_duration = 0.0
  total_duration_squared = 0.0
  for i in xrange(FLAGS.num_batches + num_steps_burn_in):
    run_options = None
    run_metadata = None
    if FLAGS.enable_trace and i == num_steps_burn_in - 1:
      run_options = config_pb2.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
      run_metadata = config_pb2.RunMetadata()
      merged = tf.merge_all_summaries()
      writer = tf.train.SummaryWriter('/home/minjie/tmp/pipeline', session.graph)

    # Run session
    start_time = time.time()
    _ = session.run(target, options=run_options, run_metadata=run_metadata)
    duration = time.time() - start_time

    if FLAGS.enable_trace and i == num_steps_burn_in - 1:
      tl = tf.python.client.timeline.Timeline(run_metadata.step_stats)
      ctf = tl.generate_chrome_trace_format()
      with open('tf_trace.ctf', 'w') as f:
        f.write(ctf)

    if i > num_steps_burn_in:
      if not i % 10:
        print ('%s: step %d, duration = %.3f speed = %.3f images/sec' %
               (datetime.now(), i - num_steps_burn_in, duration, FLAGS.batch_size / duration))
      total_duration += duration
      total_duration_squared += duration * duration
  mn = total_duration / FLAGS.num_batches
  vr = total_duration_squared / FLAGS.num_batches - mn * mn
  sd = math.sqrt(vr)
  print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
         (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
Beispiel #5
0
    def _clickedCb(self, unused_actor, event):
        if (event.modifier_state & Clutter.ModifierType.CONTROL_MASK):
            self.remove()
        elif (datetime.now() - self.lastClick).total_seconds() < 0.5:
            self.remove()

        self.lastClick = datetime.now()
    def handle(self, *args, **options):

        scriptStartTime = datetime.now()

        db1 = psycopg2.connect (database="klpmis", user="******", password="******")
        curAcObj = current_academic()
        currentAcademicYearSql="select id,name from schools_academic_year where id=%s" %(curAcObj.id)
        
        currentAcademicYearRec = run_query(db1,currentAcademicYearSql)
        currentAcademicId = currentAcademicYearRec[0][0]
        currentAcademicYearName = currentAcademicYearRec[0][1]
        currentYear = int(currentAcademicYearName.split('-')[1])
        nextAcademicYear = str(currentYear) + '-' + str(currentYear + 1)
        print 'CurrentAcademic Year :', currentAcademicYearName
        print 'Next Academic Year  : ', nextAcademicYear
        
        nextAcademicYearSql="select id,name from schools_academic_year where name='%s'"%(nextAcademicYear)
        print nextAcademicYearSql
        nextAcademicRec = run_query(db1,nextAcademicYearSql)
        nextAcademicId = nextAcademicRec[0][0]

        move_class_sql(currentAcademicId, nextAcademicId)
        totaltime = datetime.now() - scriptStartTime
        print totaltime
        print 'preschool students have been Promoted to ' ,nextAcademicYear
Beispiel #7
0
def time_tensorflow_run(session, target, info_string):
  """Run the computation to obtain the target tensor and print timing stats.

  Args:
    session: the TensorFlow session to run the computation under.
    target: the target Tensor that is passed to the session's run() function.
    info_string: a string summarizing this run, to be printed with the stats.

  Returns:
    None
  """
  num_steps_burn_in = 10
  total_duration = 0.0
  total_duration_squared = 0.0
  for i in xrange(FLAGS.num_batches + num_steps_burn_in):
    start_time = time.time()
    _ = session.run(target)
    duration = time.time() - start_time
    if i >= num_steps_burn_in:
      if not i % 10:
        print ('%s: step %d, duration = %.3f' %
               (datetime.now(), i - num_steps_burn_in, duration))
      total_duration += duration
      total_duration_squared += duration * duration
  mn = total_duration / FLAGS.num_batches
  vr = total_duration_squared / FLAGS.num_batches - mn * mn
  sd = math.sqrt(vr)
  print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
         (datetime.now(), info_string, FLAGS.num_batches, mn, sd))
Beispiel #8
0
    def test_08_publishing(self):
        self.create_default_page_set()

        page = Page.objects.get(pk=1)
        page2 = Page.objects.get(pk=2)
        self.is_published(page.get_absolute_url(), should_be=False)
        self.is_published(page2.get_absolute_url(), should_be=False)

        page.active = True
        page.save()
        page2.active = True
        page2.save()
        self.is_published(page.get_absolute_url(), should_be=True)
        self.is_published(page2.get_absolute_url(), should_be=True)

        old_publication = page.publication_date
        page.publication_date = datetime.now() + timedelta(days=1)
        page.save()
        self.is_published(page.get_absolute_url(), should_be=False)

        # Should be not accessible because of its parent's inactivity
        self.is_published(page2.get_absolute_url(), should_be=False)

        page.publication_date = old_publication
        page.publication_end_date = datetime.now() - timedelta(days=1)
        page.save()
        self.is_published(page.get_absolute_url(), should_be=False)

        # Should be not accessible because of its parent's inactivity
        self.is_published(page2.get_absolute_url(), should_be=False)

        page.publication_end_date = datetime.now() + timedelta(days=1)
        page.save()
        self.is_published(page.get_absolute_url(), should_be=True)
        self.is_published(page2.get_absolute_url(), should_be=True)
Beispiel #9
0
 def handle_noargs(self, **options):
     articles_of_interest = ArticleOfInterest.objects.all()
     for article in articles_of_interest:
         article_dict = query_text_rendered(article.title,
                                            language=article.title_language)
         # don't import articles we already have
         if SourceArticle.objects.filter(doc_id__exact='%s' % article_dict['revid'],
                                         language=article.title_language):
             continue
         try:
             source_article = SourceArticle(title=article.title,
                                            language=article.title_language,
                                            source_text=article_dict['html'],
                                            timestamp=datetime.now(),
                                            doc_id=article_dict['revid'])
             source_article.save()
             tr = TranslationRequest(article=source_article,
                                      target_language=article.target_language,
                                      date=datetime.now(),
                                      translator=DEFAULT_TRANNY)
             tr.save()
         except Exception as e:
             print type(e)
             print e.args
             try:
                 source_article.delete()
                 tr.delete()
             except:
                 pass
def index(request):
    category_list = Category.objects.order_by('-likes')[:5]
    page_list = Page.objects.order_by('-views')[:5]
    context_dict = {'categories': category_list, 'pages': page_list}

    visits = request.session.get('visits')
    if not visits:
        visits = 1
    reset_last_visit_time = False

    last_visit = request.session.get('last_visit')
    if last_visit:
        last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S")
        if (datetime.now() - last_visit_time).seconds > 0:
            visits += 1
            reset_last_visit_time = True
    else:
        reset_last_visit_time = True

    if reset_last_visit_time:
        request.session['last_visit'] = str(datetime.now())
        request.session['visits'] = visits
    context_dict['visits'] = visits

    response = render(request, 'rango/index.html', context_dict)
    return response
Beispiel #11
0
def checkTask(tile):
    session = DBSession()
    if tile.username is not None:
        if datetime.now() > tile.update + EXPIRATION_DURATION:
            tile.username = None 
            tile.update = datetime.now()
            session.add(tile)
Beispiel #12
0
	def run(self):
		spider_start_time = str(datetime.now()).split('.')[0]
		print spider_start_time, 'time to spider start!'
		proxy_manager = ProxyManager()
		page = get_html(BASE_URL)
		page = unicode(page,'GBK').encode('UTF-8')
		page_count = self.get_page_count(page)
		page_count_time = str(datetime.now()).split('.')[0]
		print page_count_time, 'get page count:', page_count
		default_ip = get_default_ip()
		if page_count != 0:
			last_proxy = None
			for i in xrange(1, page_count):
				page = get_html(URL_HEADER + str(i) + URL_END, last_proxy)
				proxy_list = filte(page)
				for proxy in proxy_list:
					if proxy.anonymous_type == '高匿':
						check_result = check_anonymous(proxy, default_ip)
						spider_time = str(datetime.now()).split('.')[0]
						if check_result[0]:
							proxy.delay_time = check_result[1]
							proxy.created_time = str(datetime.now()).split('.')[0]
							proxy.is_in_china = 2
							proxy_manager.add_proxy(proxy, spider_time)
							last_proxy = proxy
						else:
							pass
def main():
    serverName = 'localhost'
    serverPort = 12000 #port number from server program
    clientSocket = socket(AF_INET, SOCK_DGRAM) #create client socket
    message = 'ping' #message to be capitalized by server
    i = 0
    while i <= 10:
        start = datetime.now() #start time for rtt calc
        clientSocket.sendto(message,(serverName, serverPort)) #send to server
        clientSocket.settimeout(0.01) #timeout length
        try:
            newMess, serverAddress = clientSocket.recvfrom(2048)
            end = datetime.now() #end time for rtt calc
            rtt = (end - start)
            #printing of information
            print("{0}: Message:{1}\nRTT:{2}\n ".format(i, newMess, rtt))
        except timeout:
            #different printout if timeout occurs
            print("{0}: Timeout\n".format(i))
            start = 0
            rtt = 0
        i += 1 #iteration
        
    clientSocket.close() #close socket at end

    pass
Beispiel #14
0
def index(request):
    # Request the cotext of the request.
    # The context contains information such as the client's machine details, for example
    context = RequestContext(request)

    # construct a dictionary to pass to the template engine as its context. 
    
    category_list = Category.objects.order_by('-likes')[:5]
    page_list = Page.objects.order_by('-views')[:5]

    context_dict = {'categories': category_list, 'pages': page_list, 'cat_list': get_category_list()}
    
    for category in category_list:
        category.url = category.name.replace(' ', '_')

    if request.session.get('last_visit'):
        last_visit = request.session.get('last_visit')
        visits = request.session.get('visits', 0)
        print last_visit
        last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S")

        if (datetime.now() - last_visit_time).days > 0:
            request.session['visits'] = visits + 1
            request.session['last_visit'] = str(datetime.now())
    else:
        request.session['last_visit'] = str(datetime.now())
        request.session['visits'] = 1

    return render_to_response('rango/index.html', context_dict, context)
Beispiel #15
0
    def test_linked_in_add_to_profile_btn_not_appearing_without_config(self):
        # Without linked-in config don't show Add Certificate to LinkedIn button
        self.client.login(username="******", password="******")

        CourseModeFactory.create(
            course_id=self.course.id,
            mode_slug='verified',
            mode_display_name='verified',
            expiration_datetime=datetime.now(pytz.UTC) - timedelta(days=1)
        )

        CourseEnrollment.enroll(self.user, self.course.id, mode='honor')

        self.course.start = datetime.now(pytz.UTC) - timedelta(days=2)
        self.course.end = datetime.now(pytz.UTC) - timedelta(days=1)
        self.course.display_name = u"Omega"
        self.course = self.update_course(self.course, self.user.id)

        download_url = 'www.edx.org'
        GeneratedCertificateFactory.create(
            user=self.user,
            course_id=self.course.id,
            status=CertificateStatuses.downloadable,
            mode='honor',
            grade='67',
            download_url=download_url
        )
        response = self.client.get(reverse('dashboard'))

        self.assertEquals(response.status_code, 200)
        self.assertNotIn('Add Certificate to LinkedIn', response.content)

        response_url = 'http://www.linkedin.com/profile/add?_ed='
        self.assertNotContains(response, escape(response_url))
Beispiel #16
0
def _run_changed(msgs, chan):
    '''Consume the cloudsearch_changes queue, and print reporting information
    on how long it took and how many remain
    
    '''
    start = datetime.now(g.tz)
    
    changed = [pickle.loads(msg.body) for msg in msgs]
    
    fullnames = set()
    fullnames.update(LinkUploader.desired_fullnames(changed))
    fullnames.update(SubredditUploader.desired_fullnames(changed))
    things = Thing._by_fullname(fullnames, data=True, return_dict=False)
    
    link_uploader = LinkUploader(g.CLOUDSEARCH_DOC_API, things=things)
    subreddit_uploader = SubredditUploader(g.CLOUDSEARCH_SUBREDDIT_DOC_API,
                                           things=things)
    
    link_time = link_uploader.inject()
    subreddit_time = subreddit_uploader.inject()
    cloudsearch_time = link_time + subreddit_time
    
    totaltime = (datetime.now(g.tz) - start).total_seconds()
    
    print ("%s: %d messages in %.2fs seconds (%.2fs secs waiting on "
           "cloudsearch); %d duplicates, %s remaining)" %
           (start, len(changed), totaltime, cloudsearch_time,
            len(changed) - len(things),
            msgs[-1].delivery_info.get('message_count', 'unknown')))
Beispiel #17
0
    def run(self, command):
        self.set_state(9)
        self.log("Starting... %s\n" % self.id)
        self.log("Command %s\n" % command)

        store = []
        last_send_time = datetime.now()

        command = shlex.split(command)
        proc = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)

        while proc.poll() is None:
            output = proc.stdout.readline()
            store.append(output)
            print output
            print store
            if datetime.now() > timedelta(seconds=settings.REDIS_FLUSH_TIME) + last_send_time:
                if len(store) > 0:
                    print "SAVING"
                    self.log("".join(store))

                store = []
                last_send_time = datetime.now()

        self.log("".join(store))
        result_code = proc.poll()
        return result_code
def migrate_instance_task(origCls, orig_creds, migrateCls, migrate_creds, **imaging_args):
    logger.info("migrate_instance_task task started at %s." % datetime.now())
    new_image_id = migrate_instance(origCls, orig_creds,
                     migrateCls, migrate_creds,
                     **imaging_args)
    logger.info("migrate_instance_task task finished at %s." % datetime.now())
    return new_image_id
def machine_imaging_task(managerCls, manager_creds, create_img_args):
    logger.info("machine_imaging_task task started at %s." % datetime.now())
    manager = managerCls(**manager_creds)
    manager.hook = create_img_args.pop('machine_request', None)
    new_image_id = manager.create_image(**create_img_args)
    logger.info("machine_imaging_task task finished at %s." % datetime.now())
    return new_image_id
Beispiel #20
0
def wait_for_success_events(request_id, timeout_in_minutes=None,
                            sleep_time=5, stream_events=True, can_abort=False):
    if timeout_in_minutes == 0:
        return
    if timeout_in_minutes is None:
        timeout_in_minutes = 10

    start = datetime.now()
    timediff = timedelta(seconds=timeout_in_minutes * 60)

    last_time = None

    streamer = io.get_event_streamer()
    if can_abort:
        streamer.prompt += strings['events.abortmessage']

    events = []
    try:
        # Get first event in order to get start time
        while not events:
            events = elasticbeanstalk.get_new_events(
                None, None, request_id, last_event_time=None
            )

            if len(events) > 0:
                event = events[-1]
                app_name = event.app_name
                env_name = event.environment_name

                if stream_events:
                    streamer.stream_event(get_event_string(event))
                # Test event message for success string
                if _is_success_string(event.message):
                    return
                last_time = event.event_date
            else:
                time.sleep(sleep_time)

        # Get remaining events without request id
        while (datetime.now() - start) < timediff:
            time.sleep(sleep_time)

            events = elasticbeanstalk.get_new_events(
                app_name, env_name, None, last_event_time=last_time
            )

            for event in reversed(events):
                if stream_events:
                    streamer.stream_event(get_event_string(event))
                    # We dont need to update last_time if we are not printing.
                    # This can solve timing issues
                    last_time = event.event_date

                # Test event message for success string
                if _is_success_string(event.message):
                    return
    finally:
        streamer.end_stream()
    # We have timed out
    raise TimeoutError('Timed out while waiting for command to Complete. The timeout can be set using the --timeout option.')
Beispiel #21
0
 def run_round(self, 
               round_name,
               min_doge=5,
               max_doge=6):
     dogeline = "    such appreciation\n\n\n\n+/u/dogetipbot %s doge"
     round_data = {"round_name": round_name,
                   "users": [],
                   "timestamp":datetime.now()}
     submission = self.reddit.get_submission(submission_id=self.current_thread)
     for comment in submission.comments:
         try:
             if comment.author.name in round_data["users"] or comment.author.name in self.blacklist:
                 pass
                 #TODO: maybe respond to user why this comment wont work
             else:
                 user = self._get_user_from_comment(comment)
                 if user['age'] > 25:
                     tip_ammount = self._get_doge_tip(min_doge, max_doge, user['gifted'])
                     tip_comment = dogeline %(str(tip_ammount)[:8])
                     user['gifts'].append({"thread":self.current_thread, "ammount":tip_ammount, "timestamp":datetime.now()})
                     user['gifted'] += tip_ammount
                     round_data["users"].append(comment.author.name)
                     comment.reply("              "+round_name+"\n\n"+tip_comment)
                     comment.upvote()
                     self.mc.dogelympics.users.save(user)
         except AttributeError:
             pass #user probably deleted their post
     self.mc.dogelympics.rounds.save(round_data)
     print datetime.now() 
Beispiel #22
0
 def button_confirm(self):
     taskcode = self._get_mfg_wip_taskcode()
     if self.order_type == 'Upgrade':
         if not self.result_products:
             raise Warning("Please select resulting products for the upgrade")
         for line in self.result_products:
             # create quants for upgrade
             vals = {
                 'product_id': line.product_id.id,
                 'routing_id': taskcode.routing_id.id,
                 'routing_line_id': taskcode.routing_line_id.id,
                 'routing_subrouting_id': taskcode.id,
                 'location_id': taskcode.location_id.id,
                 'qty': 1,
                 'lot_id': line.lot_id.id,
                 'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
                 'create_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
                 'propagated_from_id': line.source_quant.id,
                 'wip_mod_order_id': self.id,
             }
             self.env['stock.quant'].sudo().create(vals)
     elif self.order_type == 'Repair':
         self.production_quants = self.source_quants
     else:
         raise Warning("Unknown order type")
     self.write({'state':'ready', 'date_start': datetime.today()})
Beispiel #23
0
 def make_transfer(self):
     self.sock_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     # SO_REUSEADDR: http://stackoverflow.com/questions/3229860/what-is-the-meaning-of-so-reuseaddr-setsockopt-option-linux
     s = self.sock_client
     s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
     s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 50688) # 49.5 KB
     s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 87380) # 85.3 KB
     s.bind(self.client_addr)
     s.connect(self.server_addr)
     start = datetime.now()
     
     ## client -> server
     sent = 0
     while sent < self.filesize:
         sent += s.send(self.randomfile[sent:sent+4096])
     
     # We have to wait until the server finishes reading data from its socket
     # and closes the connection.
     rcvd = s.recv(1)
     time_up = total_seconds(datetime.now() - start)
     
     time.sleep(getattr(self, 'sleep', 0)) # wait for a bucket to fill again
     
     ## server -> client
     start = datetime.now()
     
     while len(rcvd) < self.filesize:
         rcvd += s.recv(1024)
     
     time_down = total_seconds(datetime.now() - start)
     s.close()
     return time_up, time_down
def timed(msg, func):
    t0 = datetime.now()

    func()

    dt = datetime.now() - t0
    print("{} Time: {:,.3f} ms".format(msg, dt.total_seconds() * 1000.0), flush=True)
Beispiel #25
0
	def __init__(self, logfile='history.log'):
		# Initialize the scheduler
		SchedulerPolicy.__init__(self) # super()
		
		self.t = 0
		# Nodes
		self.nodes = {}
		# History
		self.logfile = logfile
		self.history = History(filename=self.logfile)
		# Job submission
		self.lastJobId = 1
		# Simulation
		self.maxTime = None
		# Id for jobs
		if sys.platform == 'win32':
			self.trackerId = datetime.now().strftime('%Y%m%d%H%M')
		else:
			self.trackerId = datetime.now().strftime('%4Y%2m%2d%2H%2M')
		# Specify if the nodes are sent to sleep when there's no load
		self.nodeManagement = True
		
		# Outputs
		self.energy = None
		
		# Step length
		self.STEP = 1
def popula_dados():
    # DEBUG
    count = 0
    x = datetime.now()
    print x
    # /DEBUG
    ultimo_idx = politicos.find_one(sort=[('idx', -1)],limit=1)
    if ultimo_idx:
        ultimo_idx = ultimo_idx.get('idx', 1)
    else:
        ultimo_idx = 1
    total = len(range(ultimo_idx,100000))
    for i in range(ultimo_idx,100000):
        # DEBUG
        if count % 1 == 0:
            percent = (float(count) / total) * 100.0
            z = datetime.now()
            delta = z-x
            timestamp_delta = delta.total_seconds()
            if percent > 5:
                eta_secs = (100.0/percent)*timestamp_delta
                eta = timedelta(seconds=eta_secs-timestamp_delta)
            else:
                eta = '--CALCULANDO--'
            sys.stdout.write("\r%d%% - %s - ETA %s - %s" %(percent, delta, eta, i))
            sys.stdout.flush()
        count += 1
        # /DEBUG
        politico = politicos.find_one({'idx':i})
        if not politico:
            dados = carrega_dados_politico(i)
            if not dados:
                continue
            salvar_dados(dados)
Beispiel #27
0
def bootstrap(request):
    # Don't create dummy playlist tracks if playlist tracks already exist!
    pl_tracks = PlaylistTrack.all().fetch(1)
    if len(pl_tracks) > 0:
        return HttpResponse(status=404)

    playlist = ChirpBroadcast()

    minutes = 0
    tracks = Track.all().fetch(100)
    for track in tracks:
        pl_track = PlaylistTrack(
            playlist=playlist,
            selector=request.user,
            established=datetime.now() - timedelta(minutes=minutes),
            artist=track.album.album_artist,
            album=track.album,
            track=track,
        )
        pl_track.put()
        if minutes > 0 and minutes % 25 == 0:
            pl_break = PlaylistBreak(playlist=playlist, established=datetime.now() - timedelta(minutes=minutes - 1))
            pl_break.put()
        minutes += 5

    return HttpResponseRedirect("/playlists/")
Beispiel #28
0
    def action_confirm(self):
        for production in self:
            taskcode = production._get_mfg_wip_taskcode()
            for num in xrange(int(production.product_qty)):
                # auto-create serial number
                vals = {
                    'production_id': production.id,
                    'product_id': production.product_id.id,
                    'base_cost': 0.0,
                }
                lot_id = self.env['stock.production.lot'].create(vals)

                # create quants for production
                vals = {
                    'product_id': production.product_id.id,
                    'routing_id': taskcode.routing_id.id,
                    'routing_line_id': taskcode.routing_line_id.id,
                    'routing_subrouting_id': taskcode.id,
                    'location_id': taskcode.location_id.id,
                    'qty': 1,
                    'lot_id': lot_id.id,
                    'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
                    'create_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
                    # 'mfg_order_id': production.id,
                    'wip_mfg_order_id': production.id,
                }
                self.env['stock.quant'].sudo().create(vals)
            production.write({'state': 'confirmed'})
def make_twitter_connection(context, twitter_pub_addr):
    """ setup twitter and ZeroMQ connections. """
    client = StreamClient(CONSUMER_KEY,
                    CONSUMER_SECRET,
                    ACCESS_TOKEN,
                    ACCESS_TOKEN_SECRET)

    #setup listener for twitter
    follow_syms = gen_follow_symbols()
    response = client.stream.statuses.filter.post(track=follow_syms)
    l_string = str(datetime.now()) + (
        " Thread called twitter agent ")
    logging.info(l_string)

    try:
        context = context
        tweet_pub_socket = context.socket(zmq.PUB)
        tweet_pub_socket.connect(twitter_pub_addr)
        sys.stdout.write("Initialized twitter_zmq_push \n")
        sys.stdout.flush()
        l_string = str(datetime.now()) + (
            "init twitter_zmq_push success")
        logging.info(l_string)

    except Exception as error:
        l_string = str(datetime.now()) + (
            "Failed to init twitter_zmq_push error: " + str(error))
        logging.info(l_string)

    process_tweets(response, tweet_pub_socket)

    return
Beispiel #30
0
    def _test_file_time_getter_tz_handling_off(self, getter):
        # Django's TZ (and hence the system TZ) is set to Africa/Algiers which
        # is UTC+1 and has no DST change. We can set the Django TZ to something
        # else so that UTC, Django's TIME_ZONE, and the system timezone are all
        # different.
        now_in_algiers = timezone.make_aware(datetime.now())

        with timezone.override(timezone.get_fixed_timezone(-300)):
            # At this point the system TZ is +1 and the Django TZ
            # is -5.
            self.assertFalse(self.storage.exists('test.file.tz.off'))

            f = ContentFile('custom contents')
            f_name = self.storage.save('test.file.tz.off', f)
            self.addCleanup(self.storage.delete, f_name)
            dt = getter(f_name)
            # dt should be naive, in system (+1) TZ
            self.assertTrue(timezone.is_naive(dt))

            # The three timezones are indeed distinct.
            naive_now = datetime.now()
            algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
            django_offset = timezone.get_current_timezone().utcoffset(naive_now)
            utc_offset = timezone.utc.utcoffset(naive_now)
            self.assertGreater(algiers_offset, utc_offset)
            self.assertLess(django_offset, utc_offset)

            # dt and naive_now should be the same effective time.
            self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
            # If we convert dt to an aware object using the Algiers
            # timezone then it should be the same effective time to
            # now_in_algiers.
            _dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
            self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
Beispiel #31
0
  def save(self, *args, **kwargs):
    # Update the field with every change
    self.lastmodified = datetime.now()

    # Call the real save() method
    super(AuditModel, self).save(*args, **kwargs)
Beispiel #32
0
 def reset_timer(self):
     self.start_time = datetime.now()
     self.last_round = datetime.now()
Beispiel #33
0
from pykronos.client import KronosClient
from pykronos.client import ResultOrder
from pykronos.common.time import datetime_to_kronos_time
from datetime import datetime
from datetime import timedelta
from dateutil.tz import tzutc

"""
## Creating A Client

Create a Kronos client with the URL of a running server.  Optionally
provide a `namespace` to explicitly work with events in a particular
namespace.
"""
kc = KronosClient('http://localhost:8151', namespace='kronos')
start = datetime.now(tz=tzutc())

"""
### A Non-blocking Client

Pass a `blocking=False` to the KronosClient constructor for a client
that won't block on the Kronos server when you insert data.  A
background thread will batch up data and send it to the server.  This
is useful for logging/metrics collection on machines that can't wait
for a write acknowledgement.  An optional `sleep_block` argument,
defaulting to `0.1` specifies how many seconds to wait between batches
to the server.  If the process running the client crashes before
flushing events, those events will be lost.
"""
nonblocking = KronosClient('http://localhost:8151', namespace='kronos',
                           blocking=False)
Beispiel #34
0
TCP_IP = '127.0.0.1'
TCP_PORT = 5005
BUFFER_SIZE = 1024  

s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)

conn, addr = s.accept()

conn.setblocking(0)
print ('Connection address:', addr)
print(conn.setblocking(0))
tm = -1
while True:
    t = datetime.now()
    print(t.second)
    if t.minute > tm + 1:
        conn.send(K3)
        tm = t.minute
    if keyboard.is_pressed('s'):
        print('Sending Roll ID!')
        conn.send(A1)
    if keyboard.is_pressed('a'):
        print('Sending Roll ID!')
        conn.send(A1_2)
    if keyboard.is_pressed('d'):
        print('Sending Roll ID!')
        conn.send(A1_3)
    ready = select.select([conn], [], [], 1)
    if ready[0]:
Beispiel #35
0
    def control_driving(self, car_controls, sensing_info):
        # =========================================================== #
        # Area for writing code about driving rule ================= #
        # =========================================================== #
        # Editing area starts from here
        #

        logging.debug(
            "=========================================================")
        logging.debug("to middle: {}".format(sensing_info.to_middle))

        logging.debug("collided: {}".format(sensing_info.collided))
        logging.debug("car speed: {} km/h".format(sensing_info.speed))

        logging.debug("is moving forward: {}".format(
            sensing_info.moving_forward))
        logging.debug("moving angle: {}".format(sensing_info.moving_angle))
        logging.debug("lap_progress: {}".format(sensing_info.lap_progress))

        logging.debug("track_forward_angles: {}".format(
            sensing_info.track_forward_angles))
        logging.debug("track_forward_obstacles: {}".format(
            sensing_info.track_forward_obstacles))
        logging.debug("opponent_cars_info: {}".format(
            sensing_info.opponent_cars_info))
        logging.debug(
            "=========================================================")

        ###########################################################################

        self.speed_list.append(sensing_info.speed)
        self.speed_list = self.speed_list[1:] if len(
            self.speed_list) > 10 else self.speed_list

        # 충돌 상황인지 판단
        if not self.collision_flag:
            if sensing_info.collided \
                    and len(list(speed for speed in self.speed_list[-10:] if abs(speed) < 1)) > 9:
                self.collision_flag = True
                self.collision_time = datetime.now()

        # 충돌했을 경우 - 2초간 후진 / 방향 체크
        if self.collision_flag:
            if self.collision_time + timedelta(
                    milliseconds=2000) > datetime.now():
                car_controls.throttle = -0.5
            else:
                # 2초 후 탈출 / 방향 체크 (정상 후진 중인지 판단)
                if sensing_info.moving_forward:
                    self.moving_backward = True
                    self.moving_backward_time = datetime.now()
                self.collision_flag = False
                self.escape_flag = True
                self.escape_time = datetime.now()

        # 역주행 상황 판단
        if not self.collision_flag and not self.moving_backward:
            if not sensing_info.moving_forward and sensing_info.speed > 0:
                self.moving_backward = True
                self.moving_backward_time = datetime.now()

        # 역주행 하고 있을 경우
        if not self.collision_flag and self.moving_backward:
            if self.moving_backward_time + timedelta(
                    milliseconds=1000) > datetime.now():
                # 역주행 벗어나기
                car_controls.steering = 1 if car_controls.steering > 0 else -1
                car_controls.throttle = 0.3
            else:
                car_controls.steering = 1 if car_controls.steering < 0 else -1
                car_controls.throttle = 0.3
                if sensing_info.speed > 0:
                    self.moving_backward = False
                else:
                    self.moving_backward_time = datetime.now()

        # 탈출 / 일반적 상황
        if not self.collision_flag and not self.moving_backward:
            if self.escape_flag:
                if self.escape_time + timedelta(
                        milliseconds=1000) > datetime.now():
                    weight = 2
                else:
                    weight = 1
                    self.escape_flag = False
            else:
                weight = 1
            # 핸들 값 할당
            car_controls.steering = weight * calculate_steering(
                self, sensing_info)
            # 속도 조절
            car_controls.throttle = 1 if sensing_info.speed < get_limit_speed(
                sensing_info) else 0

        logging.debug("steering:{}, throttle:{}, brake:{}".format(
            car_controls.steering, car_controls.throttle, car_controls.brake))

        #
        # Editing area ends
        # ==========================================================#
        return car_controls
Beispiel #36
0
from drive_controller import DrivingController
from datetime import datetime, timedelta
import logging
import math

logging.basicConfig(filename='{}.log'.format(
    datetime.now().strftime('%Y-%m-%d-%H-%M')),
                    level=logging.DEBUG)


class DrivingClient(DrivingController):
    def __init__(self):
        # =========================================================== #
        #  Area for member variables =============================== #
        # =========================================================== #
        # Editing area starts from here
        #
        self.is_debug = False
        self.collision_flag = False
        self.collision_time = 0
        self.escape_flag = False
        self.escape_time = 0
        self.moving_backward = False
        self.moving_backward_time = 0
        #
        #         # Editing area ends
        #         # ==========================================================#
        super().__init__()
        self.speed_list = []
        # 도로 범위 할당
        track_range = int((self.half_road_limit - 1.25) * 2 / 3)
Beispiel #37
0
def get_data():
    """Get the data from various sources
    In : -
    Out : df        : dataframe
         UPDATETIME : Date and time from the last update"""
    with st.spinner(f"GETTING ALL DATA ..."):
        init()
        # #CONFIG

        if platform.processor() != "":
                 data = [

                {
                    "url": "C:\\Users\\rcxsm\\Documents\phyton_scripts\\covid19_seir_models\\input_local\\owid-covid-data.csv",
                    "name": "owid",
                    "delimiter": ",",
                    "key": "date",
                    "key2": "location",
                    "dateformat": "%Y-%m-%d",
                    "groupby": None,
                    "fileformat": "csv",
                    "where_field": None,
                    "where_criterium": None
                },



            ]

        else:


            data = [

                {
                    "url": "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv",
                    "name": "owid",
                    "delimiter": ",",
                    "key": "date",
                    "key2": "location",
                    "dateformat": "%Y-%m-%d",
                    "groupby": None,
                    "fileformat": "csv",
                    "where_field": None,
                    "where_criterium": None
                },




            ]

        type_of_join = "outer"
        d = 0

        # Read first datafile
        df_temp_x = download_data_file(
            data[d]["url"], data[d]["name"], data[d]["delimiter"], data[d]["fileformat"]
        )
        # df_temp_x = df_temp_x.replace({pd.np.nan: None})
        df_temp_x[data[d]["key"]] = pd.to_datetime(
            df_temp_x[data[d]["key"]], format=data[d]["dateformat"]
        )
        firstkey = data[d]["key"]
        firstkey2 = data[d]["key2"]


        if data[d]["where_field"] != None:
            where_field = data[d]["where_field"]
            df_temp_x = df_temp_x.loc[df_temp_x[where_field] == data[d]["where_criterium"]]

        if data[d]["groupby"] is None:
            df_temp_x = df_temp_x.sort_values(by=firstkey)
            df_ungrouped = None

        else:
            df_temp_x = (
                df_temp_x.groupby([data[d]["key"]], sort=True).sum().reset_index()
            )
            df_ungrouped = df_temp_x.reset_index()
            firstkey_ungrouped = data[d]["key"]
        df = (
            df_temp_x  # df_temp is the base to which the other databases are merged to
        )
        # Read the other files

        # for d in range(1, len(data)):

        #     df_temp_x = download_data_file(
        #         data[d]["url"],
        #         data[d]["name"],
        #         data[d]["delimiter"],
        #         data[d]["fileformat"],
        #     )
        #     # df_temp_x = df_temp_x.replace({pd.np.nan: None})
        #     oldkey = data[d]["key"]
        #     newkey = "key" + str(d)
        #     oldkey2 = data[d]["key2"]
        #     newkey2 = "key2_" + str(d)
        #     df_temp_x = df_temp_x.rename(columns={oldkey: newkey})
        #     df_temp_x = df_temp_x.rename(columns={oldkey2: newkey2})
        #     #st.write (df_temp_x.dtypes)
        #     try:
        #         df_temp_x[newkey] = pd.to_datetime(df_temp_x[newkey], format=data[d]["dateformat"]           )
        #     except:
        #         st.error(f"error in {oldkey} {newkey}")
        #         st.stop()

        #     if data[d]["where_field"] != None:
        #         where_field = data[d]["where_field"]
        #         df_temp_x = df_temp_x.loc[df_temp_x[where_field] == data[d]["where_criterium"]]

        #     if data[d]["groupby"] != None:
        #         if df_ungrouped is not None:
        #             df_ungrouped = df_ungrouped.append(df_temp_x, ignore_index=True)
        #             print(df_ungrouped.dtypes)
        #             print(firstkey_ungrouped)
        #             print(newkey)
        #             df_ungrouped.loc[
        #                 df_ungrouped[firstkey_ungrouped].isnull(), firstkey_ungrouped
        #             ] = df_ungrouped[newkey]

        #         else:
        #             df_ungrouped = df_temp_x.reset_index()
        #             firstkey_ungrouped = newkey
        #         df_temp_x = df_temp_x.groupby([newkey], sort=True).sum().reset_index()

        #     df_temp = pd.merge(
        #         df_temp, df_temp_x, how=type_of_join, left_on=[firstkey, firstkey2], right_on=[newkey, newkey2]
        #     )
        #     df_temp.loc[df_temp[firstkey].isnull(), firstkey] = df_temp[newkey]
        #     df_temp = df_temp.sort_values(by=firstkey)
        # # the tool is build around "date"
        # df = df_temp.rename(columns={firstkey: "date"})

        UPDATETIME = datetime.now()


        return df,  UPDATETIME
BOLD = ',,1,,,,,,,,,,,,,'  # bold
NONE = ',,,,,,,,,,,,,,,'  # no formatting
"""
Format Descriptor Table
Position	Lookup Property		Example Value		Format String
0			fontFamily			0 = Arial, default	"0,,,,,,,,,,,,,,,"
1			fontSize			0 = 8 pt, default	",0,,,,,,,,,,,,,,"
2			bold				1 = on				",,1,,,,,,,,,,,,,"
3			italic				1 = on				",,,1,,,,,,,,,,,,"
4			underline			1 = on				",,,,1,,,,,,,,,,,"
5			strikethrough		1 = on				",,,,,1,,,,,,,,,,"
6			horizontalAlign		2 = center			",,,,,,2,,,,,,,,,"
7			verticalAlign		2 = middle			",,,,,,,2,,,,,,,,"
8			color (text)		4 = #FEEEF0			",,,,,,,,4,,,,,,,"
9			color (background)	8 = #E6F5FE			",,,,,,,,,8,,,,,,"
10			color (taskbar)		9 = #F3E5FA			",,,,,,,,,,9,,,,,"
11			currency			13 = USD			",,,,,,,,,,,13,,,,"
12			decimalCount		3 = 3 decimal places",,,,,,,,,,,,3,,,"
13			thousandsSeparator	1 = on				",,,,,,,,,,,,,1,,"
14			numberFormat		2 = currency		",,,,,,,,,,,,,,2,"
15			textWrap			1 = on				",,,,,,,,,,,,,,,1"
"""

# myTable = get_2D_array()
# for entry in myTable:
# 	print(entry)

weekday = datetime.now().weekday()
print(weekday)

print("Done")
Beispiel #39
0
    def test(self):
        model_dict = self.generator_init()
        model, image_crit = model_dict['model'], model_dict['crit']
        test_data, test_loader = self.get_test_data(self.test_data_dir)
        data_name = self.args.test_data_dir.split('/')[-1]
        print('evaling %s' % data_name)
        if self.args.rec == 'moran':
            moran = self.MORAN_init()
            moran.eval()
        elif self.args.rec == 'aster':
            aster, aster_info = self.Aster_init()
            aster.eval()
        elif self.args.rec == 'crnn':
            crnn = self.CRNN_init()
            crnn.eval()
        # print(sum(p.numel() for p in moran.parameters()))
        if self.args.arch != 'bicubic':
            for p in model.parameters():
                p.requires_grad = False
            model.eval()
        n_correct = 0
        sum_images = 0
        metric_dict = {'psnr': [], 'ssim': [], 'accuracy': 0.0, 'psnr_avg': 0.0, 'ssim_avg': 0.0}
        current_acc_dict = {data_name: 0}
        time_begin = time.time()
        sr_time = 0
        for i, data in (enumerate(test_loader)):
            images_hr, images_lr, label_strs = data
            val_batch_size = images_lr.shape[0]
            images_lr = images_lr.to(self.device)
            images_hr = images_hr.to(self.device)
            sr_beigin = time.time()
            images_sr = model(images_lr)

            # images_sr = images_lr
            sr_end = time.time()
            sr_time += sr_end - sr_beigin
            metric_dict['psnr'].append(self.cal_psnr(images_sr, images_hr))
            metric_dict['ssim'].append(self.cal_ssim(images_sr, images_hr))

            if self.args.rec == 'moran':
                moran_input = self.parse_moran_data(images_sr[:, :3, :, :])
                moran_output = moran(moran_input[0], moran_input[1], moran_input[2], moran_input[3], test=True,
                                     debug=True)
                preds, preds_reverse = moran_output[0]
                _, preds = preds.max(1)
                sim_preds = self.converter_moran.decode(preds.data, moran_input[1].data)
                pred_str_sr = [pred.split('$')[0] for pred in sim_preds]
            elif self.args.rec == 'aster':
                aster_dict_sr = self.parse_aster_data(images_sr[:, :3, :, :])
                aster_output_sr = aster(aster_dict_sr)
                pred_rec_sr = aster_output_sr['output']['pred_rec']
                pred_str_sr, _ = get_str_list(pred_rec_sr, aster_dict_sr['rec_targets'], dataset=aster_info)

                aster_dict_lr = self.parse_aster_data(images_lr[:, :3, :, :])
                aster_output_lr = aster(aster_dict_lr)
                pred_rec_lr = aster_output_lr['output']['pred_rec']
                pred_str_lr, _ = get_str_list(pred_rec_lr, aster_dict_lr['rec_targets'], dataset=aster_info)
            elif self.args.rec == 'crnn':
                crnn_input = self.parse_crnn_data(images_sr[:, :3, :, :])
                crnn_output = crnn(crnn_input)
                _, preds = crnn_output.max(2)
                preds = preds.transpose(1, 0).contiguous().view(-1)
                preds_size = torch.IntTensor([crnn_output.size(0)] * val_batch_size)
                pred_str_sr = self.converter_crnn.decode(preds.data, preds_size.data, raw=False)
            for pred, target in zip(pred_str_sr, label_strs):
                if str_filt(pred, 'lower') == str_filt(target, 'lower'):
                    n_correct += 1
            sum_images += val_batch_size
            torch.cuda.empty_cache()
            print('Evaluation: [{}][{}/{}]\t'
                  .format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                          i + 1, len(test_loader), ))
            # self.test_display(images_lr, images_sr, images_hr, pred_str_lr, pred_str_sr, label_strs, str_filt)
        time_end = time.time()
        psnr_avg = sum(metric_dict['psnr']) / len(metric_dict['psnr'])
        ssim_avg = sum(metric_dict['ssim']) / len(metric_dict['ssim'])
        acc = round(n_correct / sum_images, 4)
        fps = sum_images/(time_end - time_begin)
        psnr_avg = round(psnr_avg.item(), 6)
        ssim_avg = round(ssim_avg.item(), 6)
        current_acc_dict[data_name] = float(acc)
        # result = {'accuracy': current_acc_dict, 'fps': fps}
        result = {'accuracy': current_acc_dict, 'psnr_avg': psnr_avg, 'ssim_avg': ssim_avg, 'fps': fps}
        print(result)
Beispiel #40
0
if __name__ == '__main__':
    argv = sys.argv[1:]
    try:
        opts, args = getopt.getopt(argv, '', [
            'save', 'plan', 'init', 'no-ts', 'inittime=', 'logging=', 'parent='
        ])
    except getopt.GetoptError:
        print('startup: Could not parse parameters.')
        sys.exit(2)
    initialization_time = None
    logging = 0
    parent = None
    for opt, arg in opts:
        if opt == '--save':
            _save_log = True
        elif opt == '--init':
            rospy.on_shutdown(rospy_exit_handler)
        elif opt == '--inittime':
            initialization_time = arg
        elif opt in ['--logging']:
            l = int(arg)
            if l in [0, 1, 2]:
                logging = l
        elif opt in ['--parent']:
            parent = int(arg)

    if initialization_time is None:
        initialization_time = datetime.now().isoformat('-')
    print('startup: Going to execute main.')
    main(initialization_time, logging)
Beispiel #41
0
def get_fecha():
    fecha = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    return fecha
Beispiel #42
0
    def train(self):
        cfg = self.config.TRAIN
        train_dataset, train_loader = self.get_train_data()
        val_dataset_list, val_loader_list = self.get_val_data()
        model_dict = self.generator_init()
        model, image_crit = model_dict['model'], model_dict['crit']

        aster, aster_info = self.Aster_init()
        optimizer_G = self.optimizer_init(model)

        if not os.path.exists(cfg.ckpt_dir):
            os.makedirs(cfg.ckpt_dir)
        best_history_acc = dict(
            zip([val_loader_dir.split('/')[-1] for val_loader_dir in self.config.TRAIN.VAL.val_data_dir],
                [0] * len(val_loader_list)))
        best_model_acc = copy.deepcopy(best_history_acc)
        best_model_psnr = copy.deepcopy(best_history_acc)
        best_model_ssim = copy.deepcopy(best_history_acc)
        best_acc = 0
        converge_list = []

        for epoch in range(cfg.epochs):
            for j, data in (enumerate(train_loader)):
                model.train()

                for p in model.parameters():
                    p.requires_grad = True
                iters = len(train_loader) * epoch + j + 1

                images_hr, images_lr, label_strs = data
                if self.args.syn:
                    images_lr = nn.functional.interpolate(images_hr, (self.config.TRAIN.height // self.scale_factor,
                                                                      self.config.TRAIN.width // self.scale_factor),
                                                          mode='bicubic')
                    images_lr = images_lr.to(self.device)
                else:
                    images_lr = images_lr.to(self.device)
                images_hr = images_hr.to(self.device)

                image_sr = model(images_lr)
                loss_im = image_crit(image_sr, images_hr).mean() * 100

                optimizer_G.zero_grad()
                loss_im.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
                optimizer_G.step()

                # torch.cuda.empty_cache()
                if iters % cfg.displayInterval == 0:
                    print('[{}]\t'
                          'Epoch: [{}][{}/{}]\t'
                          'vis_dir={:s}\t'
                          '{:.3f} \t'
                          .format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                                  epoch, j + 1, len(train_loader),
                                  self.vis_dir,
                                  float(loss_im.data)))

                if iters % cfg.VAL.valInterval == 0:
                    print('======================================================')
                    current_acc_dict = {}
                    for k, val_loader in enumerate(val_loader_list):
                        data_name = self.config.TRAIN.VAL.val_data_dir[k].split('/')[-1]
                        print('evaling %s' % data_name)
                        metrics_dict = self.eval(model, val_loader, image_crit, iters, aster, aster_info)
                        converge_list.append({'iterator': iters,
                                              'acc': metrics_dict['accuracy'],
                                              'psnr': metrics_dict['psnr_avg'],
                                              'ssim': metrics_dict['ssim_avg']})
                        acc = metrics_dict['accuracy']
                        current_acc_dict[data_name] = float(acc)
                        if acc > best_history_acc[data_name]:
                            best_history_acc[data_name] = float(acc)
                            best_history_acc['epoch'] = epoch
                            print('best_%s = %.2f%%*' % (data_name, best_history_acc[data_name] * 100))

                        else:
                            print('best_%s = %.2f%%' % (data_name, best_history_acc[data_name] * 100))
                    if sum(current_acc_dict.values()) > best_acc:
                        best_acc = sum(current_acc_dict.values())
                        best_model_acc = current_acc_dict
                        best_model_acc['epoch'] = epoch
                        best_model_psnr[data_name] = metrics_dict['psnr_avg']
                        best_model_ssim[data_name] = metrics_dict['ssim_avg']
                        best_model_info = {'accuracy': best_model_acc, 'psnr': best_model_psnr, 'ssim': best_model_ssim}
                        print('saving best model')
                        self.save_checkpoint(model, epoch, iters, best_history_acc, best_model_info, True, converge_list)

                if iters % cfg.saveInterval == 0:
                    best_model_info = {'accuracy': best_model_acc, 'psnr': best_model_psnr, 'ssim': best_model_ssim}
                    self.save_checkpoint(model, epoch, iters, best_history_acc, best_model_info, False, converge_list)
Beispiel #43
0
def utcTimestampToDatetime(stamp):
	# Second half is to offset for DST since DST info is lost in mktime conversion
	# timetuple().tm_isdst returns 0 if not DST, 1 if it is DST for the given timezone associated with datetime.now()
	return datetime.fromtimestamp(mktime(stamp)).replace(tzinfo=timezone.utc) + timedelta(hours = datetime.now().timetuple().tm_isdst)
def train():
    """Train CIFAR-10 for a number of steps."""
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        # Create a variable to count the number of train() calls. This equals the
        # number of batches processed * FLAGS.num_gpus.
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)

        # Calculate the learning rate schedule.
        num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
                                 FLAGS.batch_size)
        decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)

        # Decay the learning rate exponentially based on the number of steps.
        lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,
                                        global_step,
                                        decay_steps,
                                        cifar10.LEARNING_RATE_DECAY_FACTOR,
                                        staircase=True)

        # Create an optimizer that performs gradient descent.
        opt = tf.train.GradientDescentOptimizer(lr)

        # Calculate the gradients for each model tower.
        tower_grads = []
        with tf.variable_scope(tf.get_variable_scope()):
            for i in xrange(FLAGS.num_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('%s_%d' %
                                       (cifar10.TOWER_NAME, i)) as scope:
                        # Calculate the loss for one tower of the CIFAR model. This function
                        # constructs the entire CIFAR model but shares the variables across
                        # all towers.
                        loss = tower_loss(scope)

                        # Reuse variables for the next tower.
                        tf.get_variable_scope().reuse_variables()

                        # Retain the summaries from the final tower.
                        summaries = tf.get_collection(tf.GraphKeys.SUMMARIES,
                                                      scope)

                        # Calculate the gradients for the batch of data on this CIFAR tower.
                        grads = opt.compute_gradients(loss)

                        # Keep track of the gradients across all towers.
                        tower_grads.append(grads)

        # We must calculate the mean of each gradient. Note that this is the
        # synchronization point across all towers.
        grads = average_gradients(tower_grads)

        # Add a summary to track the learning rate.
        summaries.append(tf.summary.scalar('learning_rate', lr))

        # Add histograms for gradients.
        for grad, var in grads:
            if grad is not None:
                summaries.append(
                    tf.summary.histogram(var.op.name + '/gradients', grad))

        # Apply the gradients to adjust the shared variables.
        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

        # Add histograms for trainable variables.
        for var in tf.trainable_variables():
            summaries.append(tf.summary.histogram(var.op.name, var))

        # Track the moving averages of all trainable variables.
        variable_averages = tf.train.ExponentialMovingAverage(
            cifar10.MOVING_AVERAGE_DECAY, global_step)
        variables_averages_op = variable_averages.apply(
            tf.trainable_variables())

        # Group all updates to into a single train op.
        train_op = tf.group(apply_gradient_op, variables_averages_op)

        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())

        # Build the summary operation from the last tower summaries.
        summary_op = tf.summary.merge(summaries)

        # Build an initialization operation to run below.
        init = tf.global_variables_initializer()

        # Start running operations on the Graph. allow_soft_placement must be set to
        # True to build towers on GPU, as some of the ops do not have GPU
        # implementations.
        sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=FLAGS.log_device_placement))
        sess.run(init)

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)

        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, loss])
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = duration / FLAGS.num_gpus

                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,
                                    examples_per_sec, sec_per_batch))

            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            # Save the model checkpoint periodically.
            if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    def parse(self, response):
        record = Selector(response)

        data = record.xpath('//div[contains(@id,"resultBlockWrapper")]')

        for i in data:
            item = MagicbuyhydraItem()

            item['name_lister'] = 'None'
            item['Details'] = 'None'
            item['listing_by'] = 'None'
            item['address'] = 'None'
            item['sublocality'] = 'None'
            item['age'] = '0'
            item['google_place_id'] = 'None'
            item['lat'] = '0'
            item['longt'] = '0'
            item['Possession'] = '0'
            item['Launch_date'] = '0'
            item['mobile_lister'] = 'None'
            item['areacode'] = 'None'
            item['management_by_landlord'] = 'None'
            item['monthly_rent'] = '0'
            item['price_per_sqft'] = '0'

            item['scraped_time'] = dt.now().strftime('%m/%d/%Y %H:%M:%S')

            item['city'] = 'Hyderabad'

            item['Building_name'] = i.xpath('div/input[contains(@id,"projectName")]/@value').extract_first()
            if item['Building_name'] == '':
                item['Building_name'] = 'None'

            try:
                item['longt'] = i.xpath('div/div[@class="srpColm2"]/div[@class="proColmleft"]/div[@class="proNameWrap proNameWrapBuy"]/div[@class="proNameColm1"]/span[@class="seeOnMapLink seeOnMapLinkBuy"]/span[@class="stopParentLink"]/@onclick').extract_first().split('&')[0].split('?')[-1].split("=")[-1] # .xpath('.//input[@itemprop="latitude"]/@value').extract_first()
                if item['longt'] == '':
                    item['longt'] = '0'
            except:
                item['longt'] = '0'

            try:
                item['lat'] = i.xpath('div/div[@class="srpColm2"]/div[@class="proColmleft"]/div[@class="proNameWrap proNameWrapBuy"]/div[@class="proNameColm1"]/span[@class="seeOnMapLink seeOnMapLinkBuy"]/span[@class="stopParentLink"]/@onclick').extract_first().split('&')[1].split("=")[-1]
                if item['lat'] == '':
                    item['lat'] = '0'
            except:
                item['longt'] = '0'

            item['platform'] = 'magicbricks'
            item['carpet_area'] = '0'

            ids = i.xpath('@id').extract_first()
            item['data_id'] = re.findall('[0-9]+', ids)[0]

            item['config_type'] = i.xpath('.//input[contains(@id,"bedroomVal")]/@value').extract_first().replace('>', '')
            item['config_type'] = item['config_type']+'BHK'

            item['property_type'] = i.xpath('.//p[@class="proHeading"]/a/input[2]/@value').extract_first()
            if (item['property_type'] == 'Studio Apartment'):
                item['config_type'] = '1RK'

            ####### config_type is assumed as 'None' for default #########

            if item['config_type'] == 'BHK':
                item['config_type'] = 'None'
            
            try:
                sqf = i.xpath('.//input[contains(@id,"propertyArea")]/@value').extract_first()
                if 'sqft' in sqf:
                    item['Bua_sqft'] = re.findall('[0-9]+', sqf)
                elif 'kottah' in sqf:
                    item['Bua_sqft'] = re.findall('[0-9]+', sqf)
                    item['Bua_sqft'] = str(eval(item['Bua_sqft'][0]) * 720)
                else:
                    item['Bua_sqft'] = '0'
            except:             
                item['Bua_sqft'] = '0'

            item['Locality'] = i.xpath('div/div[@class="srpColm2"]/div[@class="proColmleft"]/div[1]/div/p/a/abbr/span[1]/span/text()').extract_first()


            stat = i.xpath('.//div[1]/div[2]/div[1]/div[2]/div[1]/text()').extract()
            #print("STAT: ", stat)
            try:               
                if 'Under' in stat:
                    item['Status'] = 'Under Construction'
                    poss = stat.split('Ready by ')[-1].replace("'", "").replace(')','').replace('\n', '').replace('. Freehold', '')
                    #print("POSSESSION: ", poss)
                    yr = str(re.findall('[0-9]+', poss))
                    yr = yr.replace('[', '').replace(']', '').replace('u', '').replace("'", "")
                    if 'Jan' in poss:
                        item['Possession'] = '01/01/' + yr + ' 00:00:00'
                    if 'Feb' in poss:
                        item['Possession'] = '01/02/' + yr + ' 00:00:00'
                    if 'Mar' in poss:
                        item['Possession'] = '01/03/' + yr + ' 00:00:00'
                    if 'Apr' in poss:
                        item['Possession'] = '01/04/' + yr + ' 00:00:00'
                    if 'May' in poss:
                        item['Possession'] = '01/05/' + yr + ' 00:00:00'
                    if 'Jun' in poss:
                        item['Possession'] = '01/06/' + yr + ' 00:00:00'
                    if 'Jul' in poss:
                        item['Possession'] = '01/07/' + yr + ' 00:00:00'
                    if 'Aug' in poss:
                        item['Possession'] = '01/08/' + yr + ' 00:00:00'
                    if 'Sep' in poss:
                        item['Possession'] = '01/09/' + yr + ' 00:00:00'
                    if 'Oct' in poss:
                        item['Possession'] = '01/10/' + yr + ' 00:00:00'
                    if 'Nov' in poss:
                        item['Possession'] = '01/11/' + yr + ' 00:00:00'
                    if 'Dec' in poss:
                        item['Possession'] = '01/12/' + yr + ' 00:00:00'                    
                else:
                    item['Status'] = 'Ready to move'
                    item['Possession'] = '0'
            except:
                item['Possession'] = '0'
                item['Status'] = 'Ready to move'


            price = i.xpath('.//div/div[@class="srpColm2"]/div[@class="proColmRight"]/div/div/div/span/text()').extract_first()
            #print("PRICE: ", price)
            if price == None:
                price = i.xpath('.//span[contains(@id,"sqrFtPriceField")]/text()').extract_first()
                price = ''.join(re.findall('[0-9]+', price))
                #print("PRICE: ", price)
            if not price == None:
                if 'Lac' in price:
                    item['Selling_price'] = str(float(price.split()[0])*100000)
                elif 'Cr' in price:
                    item['Selling_price'] = str(float(price.split()[0])*10000000)
                else:
                    item['Selling_price'] = '0'
                if item['Selling_price'] == 'None':
                    item['Selling_price'] = '0'
            else:
                item['Selling_price'] = '0'

            if item['Selling_price'] == '0':
                item['price_on_req'] = 'true'
            else:
                item['price_on_req'] = 'false'

            try:
                sqft_per = i.xpath('div/div[@class="srpColm2"]/div[@class="proColmRight"]/div[@class="proPriceColm2"]/div[@class="proPriceColm2"]/div[@class="sqrPrice"]/span[@class="sqrPriceField"]/text()').extract_first()
                if sqft_per:
                    item['price_per_sqft'] = ''.join(re.findall('[0-9]+', sqft_per))
                else:
                    item['price_per_sqft'] = '0'
                if 'kottah' in sqf:
                    item['price_per_sqft'] = str(eval(item['price_per_sqft']) / 720)
            except:
                item['price_per_sqft'] = '0'
            
            
            try:
                item['listing_by'] = i.xpath('.//div[@class="proAgentWrap"]/div[1]/div/div[1]/text()').extract_first()
                item['name_lister'] = i.xpath('.//div[@class="proAgentWrap"]/div[@class="comNameElip"]/text()').extract_first().replace("\n", "")
            except:
                item['listing_by'] = 'None'
                item['name_lister'] = 'None'

            item['txn_type'] = i.xpath('div/input[contains(@id,"transactionType")]/@value').extract_first()

            day = i.xpath('div/input[contains(@id,"createDate")]/@value').extract_first()
            try:
                item['listing_date'] = dt.strftime(dt.strptime(day, "%b %d, '%y"), '%m/%d/%Y %H:%M:%S')
                item['updated_date'] = item['listing_date']
            except:
                item['listing_date'] = 0
                item['updated_date'] = item['listing_date']

            if (((not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['price_per_sqft'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None') and (not item['lat']=='0'))):
                item['quality4'] = 1
            elif (((not item['price_per_sqft'] == '0') and (not item['Building_name']=='None') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['lat']=='0')) or ((not item['Bua_sqft']=='0') and (not item['lat']=='0')) or ((not item['Selling_price'] == '0') and (not item['Bua_sqft']=='0') and (not item['Building_name']=='None')) or ((not item['Bua_sqft']=='0') and (not item['Building_name']=='None'))):
                item['quality4'] = 0.5
            else:
                item['quality4'] = 0
            if ((not item['Building_name'] == 'None') and (not item['listing_date'] == '0') and (not item['txn_type'] == 'None') and (not item['property_type'] == 'None') and ((not item['Selling_price'] == '0'))):
                item['quality1'] = 1
            else:
                item['quality1'] = 0

            if ((not item['Launch_date'] == '0') and (not item['Possession'] == '0')):
                item['quality2'] = 1
            else:
                item['quality2'] = 0

            if ((not item['mobile_lister'] == 'None') or (not item['listing_by'] == 'None') or (not item['name_lister'] == 'None')):
                item['quality3'] = 1
            else:
                item['quality3'] = 0
            yield item
def show_slice(
		ax, 
		filename, 
		axis, 
		where, 
		method='nearest', 
		rsx=100, 
		rsy=100, 
		rsz=100, 
		window=(0., 1.), 
		nlevels=100, 
		vmin=None, 
		vmax=None, 
		extend='neither', 
		logscale=False, 
		save=False, 
		zorder=0, 
		**kwargs
	):
	""" Opens a file 'filename' and shows the data at a slice perpendicular
	to the 'axis' axis at a relative position 'where', where 'where' ranges
	from 0. to 1. """

	# Keyword arguments
	if len(kwargs) >= 2:
		xyzunits = kwargs['xyzunits']
		vunits = kwargs['vunits']
	else:
		xyzunits = ''
		vunits = ''
	try:
		cmap = kwargs['cmap']
	except KeyError:
		cmap = plt.cm.jet
	try:
		show_data_points = kwargs['show_data_points']
	except KeyError:
		show_data_points = False
	try:
		show_intp_points = kwargs['show_intp_points']
	except KeyError:
		show_intp_points = False

	# Set colors for bad values
	if False:
		cmap.set_under(color='black')
		cmap.set_over(color='black')
	if False:
		cmap.set_bad(color='black')

	# Load data
	data = np.loadtxt(filename, delimiter=',')
	x = data[:,0]
	y = data[:,1]
	z = data[:,2]
	v = data[:,3]

	# Sides
	xspan = x.max() - x.min()
	yspan = y.max() - y.min()
	zspan = z.max() - z.min()

	# Window
	wl, wr = window
	# Crop the window limits just in case
	wl = max(wl, 0.)
	wl = min(wl, 1.)
	wr = max(wr, 0.)
	wr = min(wr, 1.)

	# Regular mesh
	xi_ = np.linspace(x.min(), x.max(), rsx)
	yi_ = np.linspace(y.min(), y.max(), rsy)
	zi_ = np.linspace(z.min(), z.max(), rsz)
	
	if axis == 'x':
		# Indices for the windows
		wl_i = int(rsx * wl)
		wr_i = int(rsx * wr)
		# Create the mask for the data
		xleft = x.min() + xspan * wl
		xrght = x.min() + xspan * wr
		mask = np.ma.masked_where((x >= xleft) & (x <= xrght), x).mask
		# Crop the corresponding array for the regular mesh
		xi_ = xi_[wl_i:wr_i]
	elif axis == 'y':
		# Indices for the windows
		wl_i = int(rsy * wl)
		wr_i = int(rsy * wr)
		# Create the mask for the data
		yleft = y.min() + yspan * wl
		yrght = y.min() + yspan * wr
		mask = np.ma.masked_where((y >= yleft) & (y <= yrght), y).mask
		# Crop the corresponding array for the regular mesh
		yi_ = yi_[wl_i:wr_i]
	elif axis == 'z':
		# Indices for the windows
		wl_i = int(rsz * wl)
		wr_i = int(rsz * wr)
		# Create the mask for the data
		zleft = z.min() + zspan * wl
		zrght = z.min() + zspan * wr
		mask = np.ma.masked_where((z >= zleft) & (z <= zrght), z).mask
		# Crop the corresponding array for the regular mesh
		zi_ = zi_[wl_i:wr_i]

	# 'mesh' the regular mesh
	xi, yi, zi = np.meshgrid(xi_, yi_, zi_)

	# Mask data
	x = x[mask]
	y = y[mask]
	z = z[mask]
	v = v[mask]

	# Interpolation
	t0 = datetime.now()
	vi = griddata((x,y,z), v, (xi,yi,zi), method=method)
	t1 = datetime.now()
	print('Time needed: %f s'%((t1 - t0).total_seconds()))

	# Figure

	# Levels for colouring
	vi_mkd_cpd = np.ma.masked_where(np.isnan(vi), vi).compressed()
	vmin_, vmax_ = vi_mkd_cpd.min(), vi_mkd_cpd.max()
	if vmax is None:
		vmax = vmax_
	if vmin is None:
		vmin = vmin_
	# levels_i = np.linspace(vmin, vmax, nlevels)
	# Avoid an error due to a flat level list
	if vmin == vmax:
		vmin -= 0.1
		vmax += 0.1
	# Re-build the levels
	print('Limits:', vmin, vmax)
	levels_i = np.linspace(vmin, vmax, nlevels)
	loglevels = np.logspace(
				np.log10(min(np.abs(vmin), np.abs(vmax))), 
				np.log10(max(np.abs(vmin), np.abs(vmax))), 
				nlevels
			)
	# Minimum and maximum exponents
	min_exp = int(np.log10(min(np.abs(vmin), np.abs(vmax))))
	max_exp = int(np.log10(max(np.abs(vmin), np.abs(vmax)))) + 1
	# Ticks for the colorbar in case it's logscale
	cbticks = 10. ** np.arange(min_exp, max_exp + 1, 1)
	cbticks_ = cbticks.tolist()
	for i in range(2, 10, 1):
		cbticks_ = cbticks_ + (float(i) * cbticks).tolist()
	cbticks_.sort()
	# print(cbticks_, min_exp, max_exp)
	cbticklabels = []
	for c in cbticks_:
		if c in (20, 50, 100, 200, 500, 1000):
			cbticklabels.append('%i'%c)
		else:
			cbticklabels.append('')

	if axis == 'x':
		s = int(where * rsx) - wl_i
		if not logscale:
			cf = ax.contourf(
					yi[:, s, :], 
					zi[:, s, :], 
					vi[:, s, :], 
					levels_i, 
					cmap=cmap, 
					extend=extend, 
					zorder=zorder
				)
		else:
			cf = ax.contourf(
					yi[:, s, :], 
					zi[:, s, :], 
					np.abs(vi[:, s, :]), 
					loglevels, 
					cmap=cmap, 
					norm=LogNorm(
						vmin=min(np.abs(vmin), np.abs(vmax)), 
						vmax=max(np.abs(vmin), np.abs(vmax))
						), 
					# locator=ticker.LogLocator(), 
					zorder=zorder
				)
		if show_data_points:
			ax.plot(y, z, 'k.', markersize=0.6)
		if show_intp_points:
			ax.scatter(yi, zi, c='k', s=1)
		ax.set_title('x = %f'%xi[s, s, s] + ' ' + xyzunits)
		ax.set_xlabel('y (' + xyzunits + ')')
		ax.set_ylabel('z (' + xyzunits + ')')
	elif axis == 'y':
		s = int(where * rsy) - wl_i
		if not logscale:
			cf = ax.contourf(
					xi[s, :, :], 
					zi[s, :, :], 
					vi[s, :, :], 
					levels_i, 
					cmap=cmap, 
					extend=extend, 
					zorder=zorder
				)
		else:
			cf = ax.contourf(
					xi[s, :, :], 
					zi[s, :, :], 
					np.abs(vi[s, :, :]), 
					loglevels, 
					cmap=cmap, 
					norm=LogNorm(
						vmin=min(np.abs(vmin), np.abs(vmax)), 
						vmax=max(np.abs(vmin), np.abs(vmax))
						), 
					# locator=ticker.LogLocator(), 
					zorder=zorder
				)
		if show_data_points:
			ax.plot(x, z, 'k.', markersize=0.6)
		if show_intp_points:
			ax.scatter(xi, zi, c='k', s=1)
		ax.set_title('y = %f'%yi[s, s, s] + ' ' + xyzunits)
		ax.set_xlabel('x (' + xyzunits + ')')
		ax.set_ylabel('z (' + xyzunits + ')')
	elif axis == 'z':
		s = int(where * rsz) - wl_i
		if not logscale:
			cf = ax.contourf(
					xi[:, :, s], 
					yi[:, :, s], 
					vi[:, :, s], 
					levels_i, 
					cmap=cmap, 
					extend=extend, 
					zorder=zorder
				)
		else:
			cf = ax.contourf(
					xi[:, :, s], 
					yi[:, :, s], 
					np.abs(vi[:, :, s]), 
					loglevels, 
					cmap=cmap, 
					norm=LogNorm(
						vmin=min(np.abs(vmin), np.abs(vmax)), 
						vmax=max(np.abs(vmin), np.abs(vmax))
						), 
					# locator=ticker.LogLocator(), 
					zorder=zorder
				)
		if show_data_points:
			ax.plot(x, y, 'k.', markersize=0.6)
		if show_intp_points:
			ax.scatter(xi, yi, c='k', s=1)
		ax.set_title('z = %f'%zi[s, s, s] + ' ' + xyzunits)
		ax.set_xlabel('x (' + xyzunits + ')')
		ax.set_ylabel('y (' + xyzunits + ')')
		ax.set_aspect('equal')

	if logscale:
		l_f = LogFormatter(10, labelOnlyBase=False)
		# cb = plt.colorbar(cf, ticks=cbticks_, format=l_f)
		cb = plt.colorbar(cf, format=l_f)
		# cb = plt.colorbar(cf, ticks=cbticks)
		# cb = plt.colorbar(cf)
		cb.set_ticks(cbticks_)
		# cb.set_ticklabels(cbticklabels)
		# cb.set_norm(LogNorm())
		# print(cbticklabels)
	else:
		cb = plt.colorbar(cf, ax=ax)

	cb.set_label(vunits)
Beispiel #47
0
def start():
    global counter
    global user_id
    global start
    global end 
    global status2_blink
    global start2_blink
    global stop2_threads
    
    durationStop2 = datetime.now() - timedelta(days = 1)
    duration2 = 0
    status2_toilet = "free"
    start_d = datetime.now()
    start_time = datetime.now()
    read0 = 1
    log2count = count2_people()
    temp2_count = log2count

    while True:
        time.sleep(0.1)
        read1 = wiringpi.digitalRead(GPIO_SW)
        #        print "read1= %d" % read1

        if status2_blink and (status2_toilet == "free"):
            if (datetime.now() - durationStop2).seconds > 1:
                print("stop blink")
                stop2_threads = True
                start2_blink.join()
                status2_blink = False
                duration = (datetime.now() - start_time).seconds /60  #For 1 min
                start_time = datetime.now()
                duration =str(duration)
                store2_log(duration + "\n 男子トイレ使用終了\n")
                status2("free")
                print (duration)
                print("\n男子トイレ使用終了")
                temp2_count = log2count
                status2_toilet = "free"
            
        elif (not status2_blink) and status2_toilet == "busy":
            if(datetime.now() - start_time).seconds > 60 :
                stop2_threads = False
                print("start blink")
                start2_blink = threading.Thread(target = blink_led, args = ())
                start2_blink.start()
                status2_blink = True
        if read0 == read1:

            continue

        time.sleep(0.05)
        read2 = wiringpi.digitalRead(GPIO_SW)
        now = datetime.now()
        current = datetime.now()
        current_date = now.strftime("%Y/%m/%d %H:%M:%f")
        current_time = now.strftime("%H:%M:%f")
        end = datetime.now()

        #        print "read0= %d" % read0
        if read1 == read2:
            if read1 == 1:
                duration2 = datetime.now() - durationStop2
    #            print(duration2)
                if (duration2.seconds) < 5:
                    thAnn5()
                    start_d = datetime.now()
                    status2_toilet = "busy"
                    wiringpi.digitalWrite(GPIO_LED, 0) # switch on LED. Sets port 18 to 1 (3V3, on)
                    user_id = logTable.insert_table(2, current_date, current_time,2, "Girl Busy", duration = duration)
            #        status2("Busy")
    #                print ("\n 女子トイレUse")

                else:
                #    stop_thAnn5()
                    start_time = datetime.now()
                    start_d = datetime.now()
                    status2_toilet = "busy"
                    log2count = log2count + 1
                    print ("person count:" + str(log2count))
                    start_waiting()
                    wiringpi.digitalWrite(GPIO_LED, 0)  # switch on LED. Sets port 18 to 1 (3V3, on)
                    store2_log(str(log2count) + "女子 トイレBusy\n")
                    
                    status2("Busy")
                    print ("\n 女子トイレBusy\n")
                    user_id = logTable.insert_table(2, current_date, current_time, 1, "Girl Busy", duration=duration)



            else:

                stop_waiting()
                status2_toilet = "free"
                durationStop2 = datetime.now()
                time_end = datetime.now()
                duration = time_end - start_d
                duration = duration.seconds/60.0
                wiringpi.digitalWrite(GPIO_LED, 1) # switch off LED. Sets port 18 to 0 (0V, off)
                pygame.mixer.Channel(1).stop()
            #    store2_log("女子 トイレFree\n")
                user_id = logTable.insert_table(2,current_date, current_time, 2, "Girl Free", duration= duration)
            #    print (duration)
             #   status2("Free")
             #   print ("\n女子トイレFree\n")
                if temp2_count > log2count:
                    logTable.update_table(user_id , duration)
                    temp2_count = log2count


        read0 = read1
Beispiel #48
0
def isValidDate(date):
	# 'interval' is how many hours are between each refresh
	last_refresh = datetime.now(timezone.utc) - timedelta(hours=config["schedule"]["interval"]) 
	return last_refresh < date
Beispiel #49
0
import openpyxl
import json
from datetime import datetime
import calendar
from Schedule import scheduling
import time
import serial

current_time = datetime.now()

workbook = openpyxl.load_workbook('Schedule.xlsx')
worksheet = workbook[calendar.day_name[current_time.weekday()]]


def reload():
    """ reset all data"""
    outfile = open('availability.json', mode='w')
    status_upload = {}
    for row in worksheet.iter_rows(min_row=2, min_col=0, max_col=1):
        for cell in row:
            status_upload[str(cell.value)] = "absent"
    json.dump(status_upload, outfile)
    outfile.close()


def modify(index):
    """
  enter index of the record to be changed
  """
    file_read = open("availability.json", mode="r")
    check_availability = json.load(file_read)
                                        title = title.replace(' ','')
                                        checkFlag = 1;
                                    elif checkFlag == 1:
                                        checkFlag = 2;
                                        titleCannotFound += 1
                                        titleNotFoundSet.add(originalTitle)
                                        print ('\t\tCANNOTFOUND '+ file+' title='+title )
                                    elif checkFlag == 9:
                                        break
                        if inplaceFlag != 0:
                            wf.write(line)
                    if inplaceFlag != 0:
                        wf.close()

#==main==================
print ("Start time: " + str(datetime.now()))
print ("inplaceFlag = " + str(inplaceFlag) + "\n")

labelDict = Case00_commonJobs.addDataToDict('label_ko.properties')
run(publichtml_dir, inplaceFlag)
print('\nTotal Title Count = ' + str(titleCount))
print('showLabelFound = ' + str(showLabelFound))
print('Title Changed = ' + str(titleChanged))
print('Title CANNOTFOUND = ' + str(titleCannotFound))
for i, (jsp, lineNo) in enumerate(zip(jspNames, jspLines)):
    print('  '+str(i+1)+'\t'+jsp+'\tat '+str(lineNo)+' : '+jspValues[i]+'-->'+jspKeys[i])
for j, (screen, level) in enumerate(zip(checkFlagScreens, checkFlagLevels)):
    print('    Special CheckFlagNo '+str(j+1)+': '+screen+' with checkFlag=' + str(level))
    
if inplaceFlag != 0:
    print('\n INPLACE APPLIED!!!')
Beispiel #51
0
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
import pybullet_envs
from datetime import datetime, time

from baselines.common import tf_util as U
from baselines import logger
import gym

dirname = datetime.now().strftime("%y%m%d_%I%M%S")

ENV = 'HumanoidBulletEnv-v0'
NUM = 10000
SEED = 0

def train(env_id, num, seed):
    from baselines.ppo1 import mlp_policy, pposgd_simple
    U.make_session(num_cpu=1).__enter__()
    def policy_fn(name, ob_space, ac_space):
        return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
            hid_size=64, num_hid_layers=2)
    env = gym.make(env_id)
    env.seed(seed)
    env.render('human')
    pposgd_simple.learn(env, policy_fn,
            max_iters=num,
            timesteps_per_actorbatch=2048,
            clip_param=0.2, entcoeff=0.0,
            optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,
Beispiel #52
0
#!/usr/bin/python3
# This script downloads the basis set library data from www.basissetexchange.org
# into the directory $NWCHEM_TOP/src/basis/libraries.bse
# to use, set the env. variable NWCHEM_BASIS_LIBRARY=$NWCHEM_TOP/src/basis/libraries.bse/
# Requires the installation of the python env. from
# https://github.com/MolSSI-BSE/basis_set_exchange
# See https://molssi-bse.github.io/basis_set_exchange/
#
# names changed
# def2-universal-jfit was weigend_coulomb_fitting
# dgauss-a1-dftjfit   was dgauss_a1_dft_coulomb_fitting
# dgauss-a2-dftjfit   was dgauss_a2_dft_coulomb_fitting
#
import basis_set_exchange as bse
from datetime import datetime
today = datetime.now().isoformat(timespec='minutes')
print(today)
all_bs = bse.get_all_basis_names()
md = bse.get_metadata()
for bas_name in all_bs:
    #get version and list of elements
    version_bs = md[bas_name]['latest_version']
    elements_list = md[bas_name]['versions'][version_bs]['elements']
    #open file
    # get rid of asterisks
    file_name = bas_name.replace("*","s")
    #get rid of parenthesis
    file_name = file_name.replace("(","")
    file_name = file_name.replace(")","")
    #replace commas with underscore
    file_name = file_name.replace(",","_")
Beispiel #53
0
 def gen_id(self):
     time_id = str(datetime.now()).split(".")[-1]
     random_id1 = str(random.randrange(0, 9))
     random_id2 = str(random.randrange(0, 9))
     unique_id = time_id + random_id1 + random_id2
     return unique_id
Beispiel #54
0
from datetime import datetime
import os
import sys

gbl_blnResult = ""
g_iPass_Count = 0
g_iFail_Count = 0
g_iScenario_Pass_Count = 0
g_iScenario_Fail_Count = 0
g_tStart_Time = datetime.now()

ioMode_ForWriting = 2
gbl_intScreenCount = 0
g_iImage_Capture = 1
RunMode = "Server"
BlnFailStatue = False
strScriptName = "tc1"
#Name of Report-folders and Report-File-Name for this Run
arrStartTime = str(g_tStart_Time).split(" ")
strname1 = arrStartTime[0]
strname1 = strname1.replace("-", "")
print(strname1)
strname2 = arrStartTime[1]
strname2 = strname2.replace(":", "")
strname2 = strname2.split(".")
strname2 = strname2[0]
strname = strname1 + "_" + strname2
print(strname)
strEnvironment = ""
if not os.path.exists("E:\\actitimeAutomation\\Results"):
    os.mkdir("E:\\actitimeAutomation\\Results")
Beispiel #55
0
def main(n_img):

    ############
    # INITIALIZE
    ############

    # Create unique results folder
    today = datetime.now()
    res_dir = '../results/hp_search_ResUNet53D_' + str(
        n_img) + '/' + today.strftime('%Y%m%d') + '_' + today.strftime(
            '%H%M%S')

    # Create results directory if not already existing
    os.makedirs(res_dir)
    print('Directory ' + res_dir + ' succesfully created!')

    # Logger file
    log_file = res_dir + '/log_ResUNet53D_' + str(n_img) + '.txt'
    sys.stdout = Logger(log_file)

    # Print start processing time
    print('Start processing: ' + today.strftime('%Y%m%d') + ' ' +
          today.strftime('%H%M%S'))

    n_epochs = 2500

    # Create dict to store training parameters
    train_log = {}

    # Append training parameters to training log
    train_log["n_img"] = n_img
    train_log["epochs"] = n_epochs

    ##########
    # DATASETS
    ##########

    # Load TRAIN and DEV datasets
    X_train, Y_train = load_dataset('train', n_img, 740)
    X_dev, Y_dev = load_dataset('dev', n_img, 40)

    # Standardize X data
    Xmean = np.mean(X_train)
    Xstd = np.std(X_train)

    X_train = (X_train - Xmean) / Xstd
    X_dev = (X_dev - Xmean) / Xstd

    ###################
    # INSTANTIATE TUNER
    ###################
    hypermodel = myHyperModel(n_img=n_img)

    tuner = kt.BayesianOptimization(hypermodel,
                                    objective=kt.Objective('val_ssim',
                                                           direction='max'),
                                    num_initial_points=15,
                                    max_trials=15,
                                    executions_per_trial=1,
                                    seed=1,
                                    directory=res_dir,
                                    project_name='hp_opt')

    # Print summary of search space
    tuner.search_space_summary()

    ################
    # EXECUTE SEARCH
    ################

    class ClearTrainingOutput(tf.keras.callbacks.Callback):
        def on_train_end(*args, **kwargs):
            IPython.display.clear_output(wait=True)

    results = tuner.search(
        X_train,
        Y_train,
        validation_data=(X_dev, Y_dev),
        epochs=n_epochs,
        batch_size=1,
    )
    # callbacks=[escb]

    # Print end processing time
    today = datetime.now()
    print('End processing: ' + today.strftime('%Y%m%d') + ' ' +
          today.strftime('%H%M%S'))

    # Print summary of results
    tuner.results_summary()

    # Get the optimal hyperparameters
    best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]

    print('Hyperparameter search completed!')
    print('Optimal learning rate: ' + str(best_hps.get('learning_rate')))
    print('Optimal dropout rate: ' + str(best_hps.get('dropout_rate')))
    print('Optimal beta: ' + str(best_hps.get('beta')))
    print('Optimal filt0_k12: ' + str(best_hps.get('filt0_k12')))
    print('Optimal filt0_k3: ' + str(best_hps.get('filt0_k3')))

    # Log train and val losses and metrics
    train_log["learning_rate"] = best_hps.get('learning_rate')
    train_log["dropout_rate"] = best_hps.get('dropout_rate')
    train_log["beta"] = best_hps.get('beta')
    train_log["filt0_k12"] = best_hps.get('filt0_k12')
    train_log["filt0_k3"] = best_hps.get('filt0_k3')

    # Select best model
    model = tuner.get_best_models(num_models=1)[0]

    # Store search and train history to file
    with open(res_dir + '/search_train_history', 'w') as file:
        json.dump(train_log, file)

    # Save trained model
    model.save(res_dir + '/my_model.h5')

    ###########################
    # PREDICT AND PLOT TEST SET
    ###########################

    # Load TEST examples
    X_test, Y_test = load_dataset('test', n_img, 40)
    X_test = (X_test - Xmean) / Xstd

    # Predict TEST examples
    Yhat_test = model.predict(X_test, verbose=0)

    # Plot original and predicted TEST examples
    plot_and_stats(Yhat_test, Y_test, res_dir)

    # Close logger
    sys.stdout.close()

    return
Beispiel #56
0
def train(modelParams, epochNumber):
    # import corresponding model name as model_cnn, specifed at json file
    model_cnn = importlib.import_module('Model_Factory.' +
                                        modelParams['modelName'])

    if not os.path.exists(modelParams['dataDir']):
        raise ValueError("No such data directory %s" % modelParams['dataDir'])

    _setupLogging(os.path.join(modelParams['logDir'], "genlog"))

    with tf.Graph().as_default():
        # track the number of train calls (basically number of batches processed)
        globalStep = tf.get_variable('globalStep', [],
                                     initializer=tf.constant_initializer(0),
                                     trainable=False)
        # Get images inputs for model_cnn.
        if modelParams['phase'] == 'v':
            filename, pngTemp, targetT = data_input.inputs_vali(**modelParams)
        else:
            input_data = data_input.inputs(**modelParams)
        print('Input        ready')
        #TEST###        filenametest, pngTemptest, targetTtest = data_input.inputs_test(**modelParams)

        # Build a Graph that computes the HAB predictions from the
        # inference model
        output_res = model_cnn.inference_l2reg(input_data['image'],
                                               **modelParams)
        # loss model
        #loss = model_cnn.loss_l2reg(output_res, input_data, **modelParams)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        #opTrain = model_cnn.train(loss, globalStep, **modelParams)
        ##############################
        print('Testing     ready')
        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())
        print('Saver        ready')

        # Build the summary operation based on the TF collection of Summaries.
        summaryOp = tf.summary.merge_all()
        print('MergeSummary ready')
        # Build an initialization operation to run below.
        #init = tf.initialize_all_variables()
        #        init = tf.global_variables_initializer()

        #opCheck = tf.add_check_numerics_ops()
        # Start running operations on the Graph.
        config = tf.ConfigProto(
            log_device_placement=modelParams['logDevicePlacement'])
        config.gpu_options.allow_growth = True
        config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
        sess = tf.Session(config=config)
        print('Session      ready')

        #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
        #sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
        #        sess.run(init)

        # restore a saver.
        print('Loading Ex-Model with epoch number %d ...', epochNumber)
        print('     ',
              modelParams['trainLogDir'] + '_v/model.ckpt-' + str(epochNumber))
        saver.restore(
            sess,
            (modelParams['trainLogDir'] + '_v/model.ckpt-' + str(epochNumber)))
        #saver.restore(sess, (modelParams['trainLogDir']+'_30k/model.ckpt-29000'))
        print('Ex-Model     loaded')

        if True:
            # if True: freeze graph
            tf.train.write_graph(sess.graph.as_graph_def(),
                                 '.',
                                 modelParams['trainLogDir'] + '_v/model.pbtxt',
                                 as_text=True)
            # Output nodes
            output_node_names = [
                n.name for n in tf.get_default_graph().as_graph_def().node
            ]
            # Freeze the graph
            frozen_graph_def = tf.graph_util.convert_variables_to_constants(
                sess, sess.graph_def, output_node_names)
            # Save the frozen graph
            with open(modelParams['trainLogDir'] + '_v/model.pb', 'wb') as f:
                f.write(frozen_graph_def.SerializeToString())

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)
        print('QueueRunner  started')

        summaryWriter = tf.summary.FileWriter(modelParams['logDir'],
                                              sess.graph)
        summaryValiWriter = tf.summary.FileWriter(modelParams['logDir'] + '_v',
                                                  sess.graph)
        #TEST###        summaryValiWriter = tf.summary.FileWriter(modelParams['logDir']+'_test', sess.graph)

        print('Testing     started')
        durationSum = 0
        durationSumAll = 0
        prevLoss = 99999
        prevValiSumLoss = 99999
        prevaccur = 0
        prevLossStep = 0
        prevStep = 21000
        #TEST###        prevTestSumLoss = 99999
        prevStep = int(modelParams['maxSteps'] / 2)
        l = list()
        import cv2
        lossValueSum = 0
        l2regValueSum = 0

        total_parameters = 0
        for variable in tf.trainable_variables():
            # shape is an array of tf.Dimension
            shape = variable.get_shape()
            #print(shape)
            #print(len(shape))
            variable_parameters = 1
            for dim in shape:
                #print(dim)
                variable_parameters *= dim.value
            #print(variable_parameters)
            total_parameters += variable_parameters
        print('-----total parameters-------- ', total_parameters)

        true_counter = 0
        for step in xrange(0, modelParams['maxSteps']):  #(0, 1000):
            startTime = time.time()
            #npfilename, npTargetP, npTargetT, npPng = sess.run([filename, targetP, targetT, pngTemp])
            #np_out, np_inp = sess.run([output_res, input_data])
            #npfilename = np_inp['filename']
            #npTargetP = np_out['clsf']
            #npTargetT = np_inp['clsf']
            #npfilename, npTargetT, npTargetP = sess.run([input_data['filename'], input_data['clsf'], output_res['clsf']])
            npfilename, npTargetT, npTargetP, npImageP = sess.run([
                input_data['filename'], input_data['clsf'], output_res['clsf'],
                output_res['deconv']
            ])
            #print(modelParams['outputDir']+str(step+10000)+'.jpg')
            # 1-mask imwrite
            cv2.imwrite(modelParams['outputDir'] + str(step + 10000) + '.jpg',
                        npImageP[0, :, :, 0])
            # 6-mask imwrite
            #for hemaps in range(modelParams['num_heatmap']):
            #    cv2.imwrite(modelParams['outputDir']+str(step+10000)+'_'+str(hemaps)+'.jpg', npImageP[0,:,:,hemaps])
            duration = time.time() - startTime
            if step != 0:
                l.append(duration)
            print(duration, step, modelParams['maxSteps'])
            #lossValueSum += l ossValue

            for i in range(modelParams['activeBatchSize']):
                if np.argmax(npTargetP[i, :]) == np.argmax(npTargetT[i, :]):
                    match = True
                    true_counter += 1
                else:
                    match = False
                print(np.argmax(npTargetP[i, :]), np.argmax(npTargetT[i, :]),
                      match, '----counter:', true_counter)
                #inp_out_img = np.concatenate((np_inp['deconv'][i,:,:], np_out['deconv'][i,:,:]), axis=0)
                #cv2.imshow('in --- out', cv2.resize(inp_out_img,(350,300)))
                #cv2.waitKey(0)

            #print(npfilename)
            #print(npTargetT)
            #print(npTargetP)

            #p1 = npPng[0,:,:,0]
            #p2 = npPng[0,:,:,1]
            #p1 = (p1-np.min(p1)) / (np.max(p1)-np.min(p1))
            #p2 = (p2-np.min(p2)) / (np.max(p2)-np.min(p2))
            #cv2.imshow('img0', p1)
            #cv2.imshow('img1', p2)
            #cv2.waitKey(0)
            #print(npfilename)
            print(duration, step, modelParams['maxSteps'])
            data_output.output(str(10000 + step), npfilename, npTargetP,
                               npTargetT, **modelParams)
            # Print Progress Info
            if ((step % FLAGS.ProgressStepReportStep)
                    == 0) or ((step + 1) == modelParams['maxSteps']):
                print(
                    'Progress: %.2f%%, Elapsed: %.2f mins, Testing Completion in: %.2f mins --- %s'
                    %
                    ((100 * step) / modelParams['maxSteps'], durationSum / 60,
                     (((durationSum * modelParams['maxSteps']) /
                       (step + 1)) / 60) - (durationSum / 60), datetime.now()))
            #if step == 128:
            #    modelParams['phase'] = 'train'
            #
            #if step == 130:
            #    modelParams['phase'] = 'test'
        print(np.array(l).mean())
        #l0 = np.array(l)
        #l1 = np.array(l[1:-1])
        #print(np.average(l0))
        #print(np.average(l1))

        print('----- maxsteps:', modelParams['maxSteps'], '--- step:', step)
        #print('----- maxsteps:', modelParams['maxSteps'], '--- loss avg:', lossValueSum/modelParams['maxSteps'])
        #print('----- train scaled loss:', (lossValueSum/modelParams['maxSteps'])*modelParams['trainBatchSize'])
        print(modelParams['outputDir'])

        sess.close()
    tf.reset_default_graph()
Beispiel #57
0
 def save(self, *args, **kwargs):
     if not self.key:
         self.key = self.generate_key()
     self.expired = datetime.now() + timedelta(minutes=30)
     return super().save(*args, **kwargs)
Beispiel #58
0
 def check_folder(self):
     folder = datetime.now().strftime('%d-%m-%Y')
     listfolder = 'D:\pythonProjects\Face-Recognition-Core\Core\Face-Recogition\logs\\'
     if folder not in os.listdir(listfolder):
         os.mkdir(listfolder + folder)
Beispiel #59
0
class Malware(Base):
    __tablename__ = 'malware'

    id = Column(Integer(), primary_key=True)
    name = Column(String(255), nullable=True)
    size = Column(Integer(), nullable=False)
    type = Column(Text(), nullable=True)
    mime = Column(String(255), nullable=True)
    md5 = Column(String(32), nullable=False, index=True)
    crc32 = Column(String(8), nullable=False)
    sha1 = Column(String(40), nullable=False)
    sha256 = Column(String(64), nullable=False, index=True)
    sha512 = Column(String(128), nullable=False)
    ssdeep = Column(String(255), nullable=True)
    created_at = Column(DateTime(timezone=False),
                        default=datetime.now(),
                        nullable=False)
    parent_id = Column(Integer(), ForeignKey('malware.id'))
    parent = relationship('Malware', lazy='subquery', remote_side=[id])
    tag = relationship('Tag',
                       secondary=association_table,
                       backref=backref('malware'))
    note = relationship('Note',
                        cascade='all, delete',
                        secondary=association_table,
                        backref=backref('malware'))
    analysis = relationship('Analysis',
                            cascade='all, delete',
                            secondary=association_table,
                            backref=backref('malware'))
    __table_args__ = (Index('hash_index',
                            'md5',
                            'crc32',
                            'sha1',
                            'sha256',
                            'sha512',
                            unique=True), )

    def to_dict(self):
        row_dict = {}
        for column in self.__table__.columns:
            value = getattr(self, column.name)
            row_dict[column.name] = value

        return row_dict

    def __repr__(self):
        return "<Malware ('{0}','{1}')>".format(self.id, self.md5)

    def __init__(self,
                 md5,
                 crc32,
                 sha1,
                 sha256,
                 sha512,
                 size,
                 type=None,
                 mime=None,
                 ssdeep=None,
                 name=None,
                 parent=None):
        self.md5 = md5
        self.sha1 = sha1
        self.crc32 = crc32
        self.sha256 = sha256
        self.sha512 = sha512
        self.size = size
        self.type = type
        self.mime = mime
        self.ssdeep = ssdeep
        self.name = name
        self.parent = parent
#coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os

st = datetime.now()
conf = SparkConf().setAppName('PROC_A_SUBJECT_D002011').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
    if sys.argv[5] == "hive":
        sqlContext = HiveContext(sc)
else:
    sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]

#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date  
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d") 
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0