Exemplo n.º 1
0
    def compute_hiperspaces(self):
        if not len(self.points) > 0:
            logger.error('No points to compute hull!')
            raise Exception('No points to compute hull!')

        # The heuristic caracteristic when searching to connect
        # different clusters does that it might fail
        # so we redirect the stdout to avoid such error
        # being visible to user
        stderr_fd = sys.stderr.fileno()
        with open('/tmp/qhull-output.log', 'w') as f, stderr_redirected(f):
            points = list(self.points)
            logger.info('Searching for hull in dimension %s based on %s points',
                    len(points[0]),len(points))
            output = qconvex('n',points)
            if len(output) == 1:
                logger.debug('Could not get Hull. Joggle input?')
        try:
            dim, facets_nbr, facets = self.__parse_hs_output(output)
        except IncorrectOutput:
            logger.error('Could not get hull')
            raise CannotGetHull()
        logger.info('Found hull in dimension %s of %s facets',
                dim,facets_nbr)
        self.dim = dim
        self.facets = facets
        return self.dim
 def save(self):
   """Dump all data we can collect to tmp directory"""
   data_dir_timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
   data_dir_prefix = 'satellite-sanity-save-%s' % data_dir_timestamp
   data_dir_base = tempfile.mkdtemp(suffix='', prefix='%s-' % data_dir_prefix, dir=D_TMP)
   data_dir = os.path.join(data_dir_base, 'satellite-sanity')
   os.makedirs(data_dir)
   logger.debug("Saving to directory %s" % data_dir)
   for key in self.config['commands'].keys():
     data_file = os.path.join(data_dir, key)
     fd = open(data_file, 'w')
     try:
       for row in self[key]:
         fd.write("%s\n" % row)
     except DataNotAvailable:
       logger.warn("Failed when obtaining %s" % key)
     fd.close()
     if self[key] is not None:
         data_file_lines = len(self[key])
     else:
         data_file_lines = -1
     logger.debug("Saved %s lines to %s" % (data_file_lines, data_file))
   data_tarxz = "%s.tar.xz" % data_dir_base
   command = ['tar', '-cJf', data_tarxz, '-C', data_dir_base, 'satellite-sanity']
   logger.debug("Running %s" % command)
   process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
   stdout, stderr = process.communicate()
   assert len(stderr) == 0, "Compress failed with '%s' when running '%s'" % (stderr, command)
   logger.info("Saved to %s" % data_tarxz)
   return data_tarxz
Exemplo n.º 3
0
def main(no_confirm=True):

    src_dump = get_src_dump()
    download_list = get_file_list_for_download()
    if len(download_list) == 0:
        logging.info("No newer file found. Abort now.")
        sys.exit(0)

    doc = src_dump.find_one({'_id': 'ucsc'})
    if not os.path.exists(DATA_FOLDER):
        os.makedirs(DATA_FOLDER)

    logfile = os.path.join(DATA_FOLDER, 'ucsc_dump.log')
    setup_logfile(logfile)

    # mark the download starts
    doc = {'_id': 'ucsc',
           'timestamp': timestamp,
           'data_folder': DATA_FOLDER,
           'lastmodified': latest_lastmodified,
           'logfile': logfile,
           'status': 'downloading'}
    src_dump.save(doc)
    t0 = time.time()
    download(download_list, no_confirm)
    # mark the download finished successfully
    _updates = {
        'status': 'success',
        'time': timesofar(t0),
        'pending_to_upload': True    # a flag to trigger data uploading
    }
    src_dump.update({'_id': 'ucsc'}, {'$set': _updates})
Exemplo n.º 4
0
 def closest_points(self,clusters):
     rel = {}
     closest_tuple = (None,None)
     max_all = 0
     seen = {}
     for key,cluster in clusters.items():
         for point in cluster:
             # Which cluster contains this point
             seen[point] = key
             corr_list = rel.setdefault(point,[])
             for idx,corr in enumerate(self[point,:]):
                 corr = abs(corr)
                 corr_list.append(corr)
                 if idx in cluster:
                     continue
                 if max_all < corr:
                     closest = idx
                     max_all = corr
                     closest_tuple = (point,idx)
     p0,p1 = closest_tuple
     if p0 and p1:
         for pnbr,p in enumerate([p0,p1]):
             if p in seen:
                 logger.info('Closests point p%d is from cluster %s',
                         pnbr, seen[p])
             else:
                 logger.info('Closests point p%d was disconected',pnbr)
         cor0 = np.array(rel.get(p0,self[p0,:]))
         cor1 = np.array(rel.get(p1,self[p1,:]))
     else:
         cor0 = cor1 = None
     return  p0,p1,cor0,cor1
def gather_csv_info():
    """
    Reads the input files one by one and stores the data in dictionaries
    containing lists of lists.
    """
    global args
    global w_dict, w_info
    global p_dict, p_headers_list, p_no_sum

    w_dict, p_dict = {}, {}
    p_headers_list = []
    p_no_sum = 0

    for input_file in wimport_lib.find_csv_filenames(args.input_dir):
        # get webinar and participants info
        w_info = wimport_lib.get_webinar_info(input_file, DETAILS_MARK)
        if w_info:
            w_id = wimport_lib.get_parameter('Webinar ID', w_info[0], w_info[1])
            p_info = wimport_lib.get_participants_info(input_file, w_id, DETAILS_MARK)
            p_len = len(p_info[1])
            p_no_sum += p_len
            logger.info("Reading {} \n\t --> {} participants.".format(input_file, p_len))
            # store info for later writing to files and database
            if w_id not in w_dict:
                w_dict[w_id] = [w_info[1]]
            else:
                w_dict[w_id] += [w_info[1]]
            if w_id not in p_dict:
                p_dict[w_id] = p_info[1]
            else:
                p_dict[w_id] += p_info[1]
            p_headers_list += [p_info[0]]
Exemplo n.º 6
0
 def wrapper(*args, **kwargs):
     logger.info("Running %s", func.__name__)
     start = time.time()
     result = func(*args, **kwargs)
     end = time.time()
     logger.info("Execution time: %s", end - start)
     return result
Exemplo n.º 7
0
 def send(self, cmd, payload):
     logger.info("send %s, payload:%s", cmd, str(payload))
     req = {
             "cmd":cmd,
             "payload":payload
             }
     self._send(req)
Exemplo n.º 8
0
def resume():
    logger.info('(' + get_addr(request) + ')')
    db.counter.update_one({'counter': 'resume'}, {'$inc': {'value': 1}})
    with codecs.open(app.static_folder + '/files/md/resume.md', 'r', encoding='utf8') as f:
        text = f.read()
    res = m.html(text)
    return render_template('blank.html', html=res, title='Resume')
Exemplo n.º 9
0
def insert_beer(beer_list) :
	for beer in beer_list :
		# print(beer)
		# print("\n")d
		if beer.get("name") is not None :
			string_name = unicodedata.normalize('NFKD', beer.get("name")).encode('ascii','ignore')
		else :
			continue

		if beer.get("description") is not None :
			string_desc = unicodedata.normalize('NFKD', beer.get("description")).encode('ascii','ignore')
		else :
			continue

		if beer.get("labels") is not None :
			beer_image = beer.get("labels").get("large")
		else :
			continue

		if beer.get('isOrganic') is None or beer.get("abv") is None or beer.get("ibu") is None or beer.get("breweryId") is None or beer.get("styleId")  is None or beer_image  is None:
			continue

		b = Beer(name=string_name, description=string_desc, is_organic=beer.get('isOrganic'), abv=beer.get("abv"), ibu=beer.get("ibu"), scrape_brew_id=beer.get("breweryId"), scrape_style_id=beer.get("styleId"), image=beer_image)
		db.session.add(b)
		db.session.commit()

		if beer.get("name") is not None:
			logger.info("beer: " + beer.get("name") + " populated")

	db.session.commit()
Exemplo n.º 10
0
    def package_delete(self, data_package):
        """ Delete a package by package_title
            return True if the operation success
                   False otherwise
        """
        msg = ''

        package_title = data_package[u'title']
        package_identifier = data_package[u'identifier']
        resp = self.package_search(
            prop='identifier', value=package_identifier
        )

        if not resp:
            msg = 'Package delete: \'%s\' NOT FOUND.' % package_title
            logger.error(msg)
            return False, msg

        package_id = resp[0][u'id']

        try:
            resp = self.__conn.action.package_delete(id=package_id)
        except ckanapi.NotFound:
            msg = 'Package delete: \'%s\' NOT FOUND.' % package_title
            logger.info(msg)
            return False, msg
        except Exception, error:
            msg = 'Package delete: ERROR to execute the command for %s.: %s' % (
                package_title, error
            )
            logger.error(msg)
            return False, msg
Exemplo n.º 11
0
def upload_file(local_path, remote_name, bucket=STORAGE_BUCKET):
    """
    Upload a file to Google Storage
    :param local_path: The local path to the file to upload
    :param remote_name: The name of the file in the google cloud storage
    :param bucket: The bucket on google cloud storage you want to upload the file to
    :return: True if uploaded, False otherwise
    """
    try:
        service = discovery.build('storage', 'v1', credentials=get_storage_credentials())
        logger.info("Uploading %s to google cloud" % local_path)
        req = service.objects().insert(
            bucket=bucket,
            name=remote_name,
            # predefinedAcl="publicRead",         Uncomment this line if you want your files to be accessible to anyone
            media_body=local_path)
        req.execute()

        uploaded = check_if_file_exists(remote_name)

        if uploaded is True:
            logger.info("Upload complete!")
            return True
        else:
            return False

    except Exception as e:
        logger.debug("Unable to upload file %s to google cloud: %s" % (local_path, e))
        return False
Exemplo n.º 12
0
def slice_segments(source_files, segments, output_dir=VIDEO_OUTPUT_DIR):
    tmp_files = []
    for i, seg in enumerate(segments):
        tmp_files.append(slice_segment(i, seg, source_files, output_dir))
        if (i + 1) % 20 == 0:
            logger.info('Produced segment %d / %d', i + 1, len(segments))
    return tmp_files
Exemplo n.º 13
0
 def build_ffmpeg(self, version, enable=[], disable=[]):
     """build ffmpeg libraries"""
     saved = os.getcwd()
     os.chdir(os.path.join(self.build_dir_, "ffmpeg-%s" % version))
     command(
         "sed -i -e 's|SDL_CONFIG=\"${cross_prefix}sdl-config\"|"
         "SDL_CONFIG=\"%s/bin/sdl-config\"|' ./configure" % self.install_dir_)
     shutil.copy2("configure", "configure.orig")
     os.environ['PKG_CONFIG_PATH'] = "%s/lib/pkgconfig" % self.install_dir_
     cmd = ("./configure --prefix=%s --extra-cflags=\"-I%s/include\" "
            "--extra-ldflags=\"-L%s/lib\" --enable-libfaac --enable-libmp3lame "
            "--enable-libx264 --enable-libzvbi --enable-libass --enable-gpl "
            "--enable-pthreads --enable-nonfree" % (
                self.install_dir_, self.install_dir_, self.install_dir_))
     if enable:
         for opt in enable:
             cmd += ' --enable-' + opt
     if disable:
         for opt in disable:
             cmd += ' --disable-' + opt
     logger.info(cmd)
     command(cmd)
     shutil.move("configure.orig", "configure")
     command(["make", "-j%d" % self.cpu_count_])
     command(["make", "install"])
     os.chdir(saved)
Exemplo n.º 14
0
def _cut_segment(i, seg, source_files, output_dir):
    tmp_file = os.path.join(output_dir, 'segment_%s_%f_%f.mp4' % (
        seg.epnum, seg.start, seg.duration))
    if os.path.exists(tmp_file):
        logger.info('Cached segment: %d - %s', i, tmp_file)
        return tmp_file

    if seg.epnum not in source_files:
        raise ValueError('Process fail at ' + str(i) + ': no such epnum ' +
                         seg.epnum)
    source_file = source_files[seg.epnum]
    args = [
        config['avconv'],
        '-ss', str(seg.start),
        '-i', source_file,
        '-t', str(seg.duration),
        '-vf', 'scale=' + config['resolution'],
        '-vcodec', config['vcodec'],
        '-b:v', config['bitrate'],
        '-r', config['fps'],
        '-an', tmp_file,
    ]
    p = shell.execute(*args)
    if p.returncode != 0:
        raise shell.ShellError(args, 'process fail at %d : %s' % (i, p.stderr))
    logger.info('Generated segment: %d - %s', i, tmp_file)
    return tmp_file
Exemplo n.º 15
0
def generate_key(func_name, args, dict_args_original, skip_args):
    args_concat = [v for key, v in sorted(dict_args_original.iteritems()) if key not in skip_args]




    # Get serialized arguments (function name, or string of v if is not reference checked in ugly way
    args_serialized = \
        '_'.join([
            v.__name__
            if hasattr(v, '__call__')
            else
            (str(v) if len(str(v)) < 200 else hashlib.md5(str(v)).hexdigest())
            for v in args_concat if hasattr(v, '__call__') or hasattr(v, "__init__") or str(v).find("0x") == -1])

    logger.info("Serialized args to " + args_serialized)

    key = func_name + "_" + args_serialized

    full_key = func_name + "(" + "".join(
        [str(k) + "=" + (str(v) if len(str(v)) < 200 else hashlib.md5(str(v)).hexdigest())
         for k, v in sorted(dict_args_original.iteritems()) if key not in skip_args])

    if len(key) > 400:
        key = key[0:400]

    return key, full_key
Exemplo n.º 16
0
def insert_last_month_intrest():
    INSERT_STR="INSERT INTO sc_sta_mlm (user_id,intrest,month) " \
              "SELECT * FROM view_get_last_month_intrest"
    logger.info("============开始更新每月客户利润贡献===========")
    INSERT_UPDATE_TRAN(INSERT_STR)
    logger.info("============每月客户利润贡献更新完毕===========")
    return None
Exemplo n.º 17
0
def generate_clinvar_lib(data_folder):
    sys.path.insert(0,data_folder)
    orig_path = os.getcwd()
    try:
        os.chdir(data_folder)
        logging.info("Generate XM parser")
        ret = os.system('''generateDS.py -f -o "clinvar_tmp.py" -s "clinvarsubs.py" clinvar_public.xsd''')
        if ret != 0:
            logging.error("Unable to generate parser, return code: %s" % ret)
            raise
        try:
            py = open("clinvar_tmp.py").read()
            # convert py2 to py3 (though they claim it support both versions)
            py = py.replace("from StringIO import StringIO","from io import StringIO")
            fout = open("clinvar.py","w")
            fout.write(py)
            fout.close()
            os.unlink("clinvar_tmp.py")
            # can we import it ?
            import clinvar
            logging.info("Found generated clinvar module: %s" % clinvar)
        except Exception as e:
            logging.error("Cannot convert to py3: %s" % e)
            raise
    finally:
        os.chdir(orig_path)
Exemplo n.º 18
0
 def OnInsert(self, table, columns, values):
   logger.info('OnInsert: columns=%s, values=%s' % (columns, values))
   if columns[0] == 'id' and values[0] == self.id:
     self.average_sale = self.db_handle.GetAverageSale(self.id)
     logger.info('Updating average sale to %s' % self.average_sale)
     if not self.is_alive() and self.average_sale != -1:
       self.run()
Exemplo n.º 19
0
def insert_rep(loan_apply_id,tran_no,last_repayment_day,crnt_pr,arfn_pr,crnt_int,arfn_int):
    try:
        f_arfn_pr=float(arfn_pr)#已还本金
        f_arfn_int=float(arfn_int)#已还利息
        f_crnt_pr=float(crnt_pr)#应还本金
        f_crnt_int=float(crnt_int)#应还利息

        if f_arfn_int<f_crnt_int or f_arfn_pr<f_crnt_pr:
            if DAO_overdue.get_is_overdue(loan_apply_id,tran_no):
                status=2
            elif f_arfn_pr==0 and f_arfn_int==0:
                status=0
            else:
                status=1
        else:
            status=3

        total_repayment=f_arfn_pr+f_arfn_int
        logger.info("插入还款编号-"+str(loan_apply_id)+",期数-"+str(tran_no)+"")
        REP_INSERT_STR="INSERT INTO sc_repayment  \
                       (loan_apply_id,repayment_installments,re_principal,re_interest, \
                       clear_date,total_repayment,status)  \
                       VALUES  \
                       (%s,%s,%s,%s,%s,%s,%s)"%(loan_apply_id,tran_no,arfn_pr,arfn_int,last_repayment_day,total_repayment,status)
        INSERT_UPDATE_TRAN(REP_INSERT_STR)
    except:
        logger.exception('exception')

    return None
Exemplo n.º 20
0
def update_rep(loan_apply_id,tran_no,last_repayment_day,crnt_pr,arfn_pr,crnt_int,arfn_int,id):
    f_arfn_pr=float(arfn_pr)#已还本金
    f_arfn_int=float(arfn_int)#已还利息
    f_crnt_pr=float(crnt_pr)#应还本金
    f_crnt_int=float(crnt_int)#应还利息

    if f_arfn_int<f_crnt_int or f_arfn_pr<f_crnt_pr:
        if DAO_overdue.get_is_overdue(loan_apply_id,tran_no):
            status=2
        elif f_arfn_pr==0 and f_arfn_int==0:
            status=0
        else:
            status=1
    else:
        status=3

    total_repayment=f_arfn_pr+f_arfn_int

    logger.info("更新贷款编号-"+str(loan_apply_id)+",期数-"+str(tran_no)+"")

    REP_UPDATE_STR="UPDATE sc_repayment SET " \
                   "loan_apply_id=%s,repayment_installments=%s,re_principal=%f,re_interest=%f," \
                   "clear_date=%d,total_repayment=%f,status=%d " \
                   "WHERE id=%s"%(loan_apply_id,tran_no,arfn_pr,arfn_int,last_repayment_day,total_repayment,status,id)
    INSERT_UPDATE_TRAN(REP_UPDATE_STR)

    return None
Exemplo n.º 21
0
    def save(self, filename=None):
        pnml = etree.Element('pnml')
        net = etree.SubElement(pnml, 'net', id=self.id, type="http://www.pnml.org/version-2009/grammar/ptnet")
        net_name = etree.SubElement(net, 'name')
        net_name_text = etree.SubElement(net_name, 'text')
        net_name_text.text = self.name

        page = etree.SubElement(net, 'page', id='page')

        for tr_id, tr in self.transitions.items():
            transition = etree.SubElement(page, 'transition', id=tr_id)
            transition_name = etree.SubElement(transition, 'name')
            transition_name_text = etree.SubElement(transition_name, 'text')
            transition_name_text.text = tr.label

        for pl_id, pl in self.places.items():
            place = etree.SubElement(page, 'place', id=pl_id)
            place_name = etree.SubElement(place, 'name')
            place_name_text = etree.SubElement(place_name, 'text')
            place_name_text.text = pl.label
            place_init_mark = etree.SubElement(place, 'initialMarking')
            place_init_mark_text = etree.SubElement(place_init_mark, 'text')
            place_init_mark_text.text = str(pl.marking)

        for arc in self.arcs:
            arc_node = etree.SubElement(page, 'arc', id=arc.id,
                    source=arc.source.id, target=arc.destination.id)
            if arc.value > 1:
                arc_value = etree.SubElement(arc_node, 'value')
                arc_value_text = etree.SubElement(arc_value, 'text')
                arc_value_text.text = str(arc.value)
        tree = etree.ElementTree(element=pnml)
        tree.write(filename or self.filename, encoding="utf-8",
                xml_declaration=True, method="xml", pretty_print=True)
        logger.info('Generated the PNML %s', self.filename)
Exemplo n.º 22
0
    def log_building_start(self):
        if self.merge_logging:
            #setup logging
            logfile = 'databuild_{}_{}.log'.format('genedoc' + '_' + self._build_config['name'],
                                                   time.strftime('%Y%m%d'))
            logfile = os.path.join(self.log_folder, logfile)
            setup_logfile(logfile)

        src_build = getattr(self, 'src_build', None)
        if src_build:
            #src_build.update({'_id': self._build_config['_id']}, {"$unset": {"build": ""}})
            d = {'status': 'building',
                 'started_at': datetime.now(),
                 'logfile': logfile,
                 'target_backend': self.target.name}
            if self.target.name == 'mongodb':
                d['target'] = self.target.target_collection.name
            elif self.target.name == 'es':
                d['target'] = self.target.target_esidxer.ES_INDEX_NAME
            logging.info(pformat(d))
            src_build.update({'_id': self._build_config['_id']}, {"$push": {'build': d}})
            _cfg = src_build.find_one({'_id': self._build_config['_id']})
            if len(_cfg['build']) > self.max_build_status:
                #remove the first build status record
                src_build.update({'_id': self._build_config['_id']}, {"$pop": {'build': -1}})
Exemplo n.º 23
0
 def compute_hiperspaces(self):
     # La característica heurística al buscar conexiones entre
     # diferentes clusters hace que pueda fallar
     # por lo que redirigimos la salida para ser silenciosos
     # en esos casos
     if not len(self.points) > 0:
         logger.error('No points to compute hull!')
         raise Exception('No points to compute hull!')
     stderr_fd = sys.stderr.fileno()
     with open('/tmp/qhull-output.log', 'w') as f, stderr_redirected(f):
         points = list(self.points)
         logger.info('Searching for hull in dimension %s based on %s points',
                 len(points[0]),len(points))
         output = qconvex('n',points)
         if len(output) == 1:
             logger.debug('Could not get Hull. Joggle input?')
     try:
         dim, facets_nbr, facets = self.__parse_hs_output(output)
     except IncorrectOutput:
         logger.warning('Could not get hull')
         raise CannotGetHull()
     logger.info('Found hull in dimension %s of %s facets',
             dim,len(facets))
     self.dim = dim
     self.facets = facets
     if self.verbose:
         print "Computed MCH with ",facets_nbr," halfspaces"
         print 'This are them:\n'
         for facet in self.facets:print facet
     return self.dim
Exemplo n.º 24
0
 def run(self):
     UDPServer(self.ifname, self.ip, self.test).start()
     TCPClient(self.host, self.ip, self.send_err,
               self.file_path, self.test).start()
     logger.info('Emulator started on %s' % self.ip)
     if self.test:
         time.sleep(60)
         stop()
Exemplo n.º 25
0
  def run(self):
    logger.info('Running.')
    while True:
      if not self.Scrape():
        logger.info('Stopping.')
        break

      util.Sleep(self.scrape_every)
 def get_word_vector(self, word):
     try:
         vector = self.m[word]
         logger.info('Got vector for word: %s', word)
     except KeyError:
         logger.info('No vector for word: %s', word)
         vector = get_random_vector(distribution=vector_distribution)
     return vector
Exemplo n.º 27
0
 def __init__(self, user_agent=None):
     self.__address = ckan_config['ckan_address']
     self.__apikey = ckan_config['ckan_apikey']
     self.__user_agent = user_agent
     self.__conn = ckanapi.RemoteCKAN(self.__address,
         self.__apikey,
         self.__user_agent)
     logger.info('Connected to %s' % self.__address)
Exemplo n.º 28
0
 def process_messages(self):
     """ Process all the messages from the queue and stop after
     """
     logger.info('START to process messages in \'%s\'', self.queue_name)
     self.rabbit.open_connection()
     print 'work in progress'
     self.rabbit.close_connection()
     logger.info('DONE processing messages in \'%s\'', self.queue_name)
Exemplo n.º 29
0
def main(*args, **kwargs):
    usage = """
        Usage: ./pnml_comparator_script.py <.ini config filename>

        Config file options:
     %s\n
NOTE: Do you have the needed environment variables?
    - XES : Path to .xes file with traces (for running PacH)
    - PNML : Path to .pnml file with Petri net model (for simplifying PROM models)
    - NXES : Path to .xes file with negative traces
    - PETRI : Path where simplified .pnml files should be moved to after script ends
    - STATS : Path where statistic files should be moved to after script ends
  IMPORTANT: NO PATH MUST END IN '/' (it is added automatically)
    """%(config_options)
    if not check_argv(sys.argv, minimum=1, maximum=4):
        print usage
        ret = -1
    else:
        ret = 0
        try:
            config_file = sys.argv[1]
            if not config_file.endswith('.ini'):
                print config_file, ' does not end in .ini. It should...'
                raise Exception('Filename has wrong extension')
            if not isfile(config_file):
                raise Exception("No such file")
            if '--debug' in sys.argv:
                pdb.set_trace()
            for filename, arguments in parse_config(config_file):
                comparator = ComparatorPnml(filename, **arguments)
                complexity = comparator.compare()
                logger.info('%s complexity -> %s',filename,complexity)
                comparator.generate_pnml()
                comparator.generate_outputs()
                comparator.check_hull()
            pnml_folder,out_folder = parse_config_output(config_file)
            pwd = os.getcwd()
            for basename in os.listdir(pwd):
                if basename.endswith('.pnml'):
                    pnml_file = os.path.join(pwd, basename)
                    if os.path.isfile(pnml_file):
                        shutil.copy2(pnml_file, pnml_folder)
                        os.remove(pnml_file)
                elif basename.endswith('.out'):
                    out_file = os.path.join(pwd, basename)
                    if os.path.isfile(out_file):
                        shutil.copy2(out_file, out_folder)
                        os.remove(out_file)
        except Exception, err:
            ret = 1
            if hasattr(err, 'message'):
                print 'Error: ', err.message
            else:
                print 'Error: ', err
            logger.error('Error: %s' % err, exc_info=True)
            raise err
        return ret
Exemplo n.º 30
0
 def _load_ensembl2entrez_li(self):
     ensembl2entrez_li = loadobj(("ensembl_gene__2entrezgene_list.pyobj", self.src), mode='gridfs')
     #filter out those deprecated entrez gene ids
     logging.info(len(ensembl2entrez_li))
     ensembl2entrez_li = [(ensembl_id, self._entrez_geneid_d[int(entrez_id)]) for (ensembl_id, entrez_id) in ensembl2entrez_li
                          if int(entrez_id) in self._entrez_geneid_d]
     logging.info(len(ensembl2entrez_li))
     ensembl2entrez = list2dict(ensembl2entrez_li, 0)
     self._idmapping_d_cache['ensembl_gene'] = ensembl2entrez
Exemplo n.º 31
0
def download_magic():
    """
    Download raw .jpgs of magic the gathering cards
    """
    parser = argparse.ArgumentParser(prog='MTG Download')
    parser.add_argument('--aws', action='store_true')
    parser.add_argument('--windows', action='store_true')
    parser.add_argument('--skipdata', action='store_true')
    parser.add_argument('--skipresults', action='store_true')
    parser.add_argument('--dryrun', action='store_true')
    args = parser.parse_args()

    if args.aws:
        # General commands
        sync_base = 'aws s3 sync '
        dryrun_arg = ' --dryrun'
        results_sync = '{} {}'.format(Config.CLOUD_RESULTS, Config.RESULTS_DIR)
        data_sync = '{} {}'.format(Config.CLOUD_DATA, Config.DATA_DIR)
        include_flags = " --exclude '*' --include 'card_classifier/*'"
        if args.windows:
            include_flags = re.sub("'", "", include_flags)

        if not args.skipdata:
            logger.info('Downloading Data from AWS')
            cc_sync = sync_base + data_sync + include_flags
            cc_sync += dryrun_arg if args.dryrun else ''
            logger.info(cc_sync)
            os.system(cc_sync)

            logger.info('Unzip image archives.')
            for archive in tqdm(
                ['cropped.zip', 'curated.zip', 'mtg_images.zip']):
                full_archive = os.path.join(Config.DATA_DIR, 'card_classifier',
                                            archive)
                if os.path.exists(full_archive):
                    shutil.unpack_archive(full_archive,
                                          re.sub('.zip', '', full_archive),
                                          'zip')
                    os.remove(full_archive)

        if not args.skipresults:
            logger.info('Downloading Results from AWS')
            cc_sync = sync_base + results_sync + include_flags
            cc_sync += dryrun_arg if args.dryrun else ''
            logger.info(cc_sync)
            os.system(cc_sync)

    else:
        logger.info('Downloading Metadata')
        metadata = get_mtg_metadata()
        logger.info('Wrangling Metadata')
        metadata = wrangle_mtg_metadata(metadata)
        logger.info('Downloading Image files')
        df_failed = _download_magic(metadata)
        logger.info('Failed to download {} images.'.format(df_failed.shape[0]))
        logger.info('Saving Metadata')
        metadata.to_csv(os.path.join(Config.DATA_DIR, 'mtg_images',
                                     'metadata.csv'),
                        index=False)
        logger.info('Saving Failed Images info.')
        df_failed.to_csv(os.path.join(Config.DATA_DIR, 'mtg_images',
                                      'failed_images.csv'),
                         index=False)
Exemplo n.º 32
0
 async def on_ready(self):
     logger.info("User is ready")
Exemplo n.º 33
0
    def diagnose(self):
        """
        Print diagnostics for the fit model
        """
        if self.summary is None:
            raise ValueError('Fit a model first.')

        logger.info('Printing Results.')
        # Get trues
        y = self.fit_transform(self.etl())['y']
        preds = self.summary[self.summary['labels'].str.contains('y_hat')]['mean'].values

        # Random Intercepts
        df_random_effects = self.summary[self.summary['labels'].str.startswith('a[')]. \
            sort_values('mean', ascending=False).reset_index(drop=True)
        df_random_effects['labels'] = df_random_effects['labels'].map(self.random_effect_inv)

        # Coefficients
        df_coefs = self.summary[self.summary['labels'].str.contains('^b[0-9]', regex=True)]. \
            assign(labels=self.features). \
            sort_values('mean', ascending=False). \
            reset_index(drop=True)

        # Globals
        df_globals = self.summary[self.summary['labels'].isin(['mu_a', 'sigma_a', 'sigma_y'])].reset_index(drop=True)
        if self.response_distributions[self.response] == 'bernoulli_logit':
            df_globals = df_globals[df_globals['labels'] != 'sigma_y'].reset_index(drop=True)

        with PdfPages(os.path.join(self.results_dir, 'diagnostics_{}.pdf'.format(self.version))) as pdf:
            # Bar graph of random effects for top 10, bottom 10, big10 teams
            plt.figure(figsize=(8, 8))
            df_top10 = df_random_effects.sort_values('mean', ascending=False).head(10).reset_index(drop=True)
            plt.bar(df_top10['labels'], df_top10['mean'])
            plt.errorbar(x=df_top10.index, y=df_top10['mean'], yerr=df_top10['sd'], fmt='none', ecolor='black')
            plt.grid(True)
            plt.xticks(rotation=90)
            plt.title('Top Ten Teams')
            plt.tight_layout()
            pdf.savefig()
            plt.close()

            # Bottom 10
            plt.figure(figsize=(8, 8))
            df_bot10 = df_random_effects.sort_values('mean', ascending=False).tail(10).reset_index(drop=True)
            plt.bar(df_bot10['labels'], df_bot10['mean'])
            plt.errorbar(x=df_bot10.index, y=df_bot10['mean'], yerr=df_bot10['sd'], fmt='none', ecolor='black')
            plt.grid(True)
            plt.title('Bottom Ten Teams')
            plt.xticks(rotation=90)
            plt.tight_layout()
            pdf.savefig()
            plt.close()

            if self.random_effect in ['team', 'opponent']:
                # nfc north
                plt.figure(figsize=(8, 8))
                df_nfcn = df_random_effects[df_random_effects['labels'].isin(['CHI', 'GNB', 'DET', 'MIN'])].\
                    sort_values('mean', ascending=False).reset_index(drop=True)
                plt.bar(df_nfcn['labels'], df_nfcn['mean'])
                plt.errorbar(x=df_nfcn.index, y=df_nfcn['mean'], yerr=df_nfcn['sd'], fmt='none', ecolor='black')
                plt.grid(True)
                plt.hlines(xmax=max(df_nfcn.index) + 1, xmin=min(df_nfcn.index) - 1, y=0, linestyles='dashed')
                plt.title('NFC North Teams')
                plt.xticks(rotation=90)
                plt.tight_layout()
                pdf.savefig()
                plt.close()

            # Coefficients
            plt.figure(figsize=(8, 8))
            plt.bar(df_coefs['labels'], df_coefs['mean'])
            plt.errorbar(x=df_coefs.index, y=df_coefs['mean'], yerr=df_coefs['sd'], fmt='none', ecolor='black')
            plt.grid(True)
            plt.hlines(xmax=max(df_coefs.index) + 1, xmin=min(df_coefs.index) - 1, y=0, linestyles='dashed')
            plt.title('Coefficients')
            plt.xticks(rotation=90)
            plt.tight_layout()
            pdf.savefig()
            plt.close()

            # Globals
            plt.figure(figsize=(8, 8))
            plt.bar(df_globals['labels'], df_globals['mean'])
            plt.errorbar(x=df_globals.index, y=df_globals['mean'], yerr=df_globals['sd'], fmt='none', ecolor='black')
            plt.grid(True)
            plt.hlines(xmax=max(df_globals.index) + 1, xmin=min(df_globals.index) - 1, y=0, linestyles='dashed')
            plt.title('Globals')
            plt.xticks(rotation=90)
            plt.tight_layout()
            pdf.savefig()
            plt.close()

            if self.response_distributions[self.response] == 'bernoulli_logit':
                logger.info('Extra diagnostics for {}'.format(self.response))
                # For Binaries, plot a ROC curve, histogram of predictions by class
                fpr, tpr, th = roc_curve(y, preds)
                score = auc(fpr, tpr)
                plt.figure(figsize=(8, 8))
                plt.plot(fpr, tpr, label='AUC: {a:0.3f}'.format(a=score))
                plt.plot([0, 1], [0, 1], color='black', linestyle='dashed')
                plt.grid(True)
                plt.xlabel('False Positive Rate')
                plt.ylabel('True Positive Rate')
                plt.legend()
                plt.tight_layout()
                pdf.savefig()
                plt.close()

                # Precision / Recall
                plt.figure(figsize=(8, 8))
                plt.plot(th, fpr, label='False Positive Rate')
                plt.plot(th, tpr, label='True Positive Rate')
                plt.title('Precisions / Recall')
                plt.xlabel('Cutoff')
                plt.ylabel('Rate')
                plt.grid(True)
                plt.legend()
                plt.tight_layout()
                pdf.savefig()
                plt.close()

                # Histograms
                bins = np.linspace(min(preds) * 0.99, max(preds) * 1.01, 20)
                plt.figure(figsize=(8, 8))
                plt.hist(preds[y == 1], alpha=0.5, bins=bins, color='darkorange', density=True, label='Positive')
                plt.hist(preds[y == 0], alpha=0.5, bins=bins, density=True, label='Negative')
                plt.grid(True)
                plt.legend()
                plt.xlabel('Probability')
                plt.ylabel('Density')
                plt.tight_layout()
                pdf.savefig()
                plt.close()

            elif self.response_distributions[self.response] == 'linear':
                logger.info('Extra Diagnostics for {}'.format(self.response))
                # For continuous, plot a distribution of residuals with r-squared and MSE
                residuals = y - preds
                mse = np.sum(residuals ** 2)
                plt.figure(figsize=(8, 8))
                plt.hist(residuals, label='MSE: {m:0.3f}'.format(m=mse), bins=20)
                plt.legend()
                plt.xlabel('Residuals')
                plt.ylabel('Counts')
                plt.grid(True)
                plt.tight_layout()
                pdf.savefig()
                plt.close()
Exemplo n.º 34
0
import argparse
import shlex
import subprocess

from config import get_accounts, logger, LOG_LEVEL
from vjudge.main import VJudge

parser = argparse.ArgumentParser()
parser.add_argument('-b',
                    required=False,
                    dest='address',
                    default='localhost:5000',
                    help='address to bind')
args = parser.parse_args()

p = subprocess.Popen(
    shlex.split(
        f"gunicorn -w 2 -k gevent --logger-class config.GLogger --log-level {LOG_LEVEL} "
        f"-b '{args.address}' manage:app"))

try:
    normal_accounts, contest_accounts = get_accounts()
    vjudge = VJudge(normal_accounts=normal_accounts,
                    contest_accounts=contest_accounts)
    vjudge.start()
except KeyboardInterrupt:
    logger.info('VJudge exiting')
finally:
    p.terminate()
    p.wait()
    t = t.cuda()
    for _ in range(1):
        c = t.clone()
        dist.all_reduce(c, dist.ReduceOp.SUM)
        t.set_(c)
    logger.info("{} value: {}".format(rank, t))


def init_processes(rank, size, fn, backend='nccl'):
    """ Initialize the distributed environment. """
    os.environ['MASTER_ADDR'] = '127.0.0.1'
    os.environ['MASTER_PORT'] = '29500'
    dist.init_process_group(backend, rank=rank, world_size=size)
    fn(rank, size)

def start(size):
    processes = []
    for rank in range(size):
        p = Process(target=init_processes, args=(rank, size, run))
        p.start()
        processes.append(p)

    for p in processes:
        p.join()

if __name__ == "__main__":
    set_global_variables()
    args = get_args()
    logger.info("args: {}".format(args))
    start(args.size)
Exemplo n.º 36
0
    def process_phone_sms_verify_pc(self):
        """
        # 手机短信验证码验证
        :param kwargs:
        :return: 成功返回 True, 失败返回 False
        提示  为了调试方便 调整为FALSE
        """
        try:
            logger.info("手机短信验证处理中")
            try:
                tel_code = self.driver.find_element_by_css_selector(
                    self.get_key_words(7, "css", 1))
                if tel_code:
                    sub_button = self.driver.find_element_by_css_selector(
                        'button[id="checkpointSecondaryButton"]')
                    self.click(sub_button)
            except:
                pass

            try:
                tel_button = self.driver.find_elements_by_css_selector(
                    'i[class^="img sp_"]')
                if not tel_button:
                    return False, -1
            except:
                pass
            self.click(tel_button[3])
            tel_stutas = self.driver.find_elements_by_css_selector(
                'a[role="menuitemcheckbox"]')
            self.click(tel_stutas[45])

            self.sleep()
            send_tel = self.driver.find_element_by_css_selector(
                'input[type="tel"]')
            self.send_keys(send_tel, "16500000000")

            self.sleep()
            submit_button = self.driver.find_element_by_css_selector(
                'button[id="checkpointSubmitButton"]')
            self.click(submit_button)

            # 提交失败
            try:
                submit_error = WebDriverWait(self.driver, 6).until(
                    EC.presence_of_element_located(
                        (By.CSS_SELECTOR,
                         'input[data-xui-error-position="above"]')))
                if submit_error:
                    logger.error("请填写正确的手机号码")
                    return False, 7
            except:
                pass
            # 短信验证码
            self.sleep()
            tel_code = self.driver.find_element_by_css_selector(
                'input[name="p_c"]')
            self.send_keys(tel_code, "414141")

            self.sleep()
            submit_button = self.driver.find_element_by_css_selector(
                'button[id="checkpointSubmitButton"]')
            self.click(submit_button)

        except Exception as e:
            logger.exception("处理手机短信验证处理异常, e={}".format(e))
            return False, 7
        logger.info("处理手机短信验证处理完成")
        return False, 7
Exemplo n.º 37
0
            try:
                side1_detector_action, side1_fighter_action = agent1.get_action(
                    side1_obs_dict, step_cnt)

                # todo 更改我方动作
                # for i in range(len(side1_fighter_action)):
                #     if args.agent1 == 'fix_rule':
                #         # 规则
                #         side1_fighter_action[i]['r_fre_point'] = i + 1
                #     else:
                #         # model
                #         # side1_fighter_action[i][1] = i+1
                #         side1_fighter_action[i][1] = random.randint(1, 10)
                #         # side1_fighter_action[i][2] = 11

                logger.info('side1 actions:')
                for i, act in enumerate(side1_fighter_action):
                    logger.info('fight_{}:{}'.format(i + 1, act))
            except:
                env.set_surrender(0)
                # reward
                o_detector_reward, o_fighter_reward, o_game_reward, e_detector_reward, e_fighter_reward, e_game_reward = env.get_reward(
                )
                agent1_crash_list.append(round_cnt)
                print('Side 1 crashed!')
                side1_obs_raw, side2_obs_raw = env.get_obs_raw()
                side1_detector_obs_raw_list = side1_obs_raw[
                    'detector_obs_list']
                side1_fighter_obs_raw_list = side1_obs_raw['fighter_obs_list']
                side1_joint_obs_raw_dict = side1_obs_raw['joint_obs_dict']
                side2_detector_obs_raw_list = side2_obs_raw[
Exemplo n.º 38
0
    def monitor_vm(self, vm, all_vms):
        """
		Main function of the monitor
		"""
        try:
            vm_pct_free_memory = float(vm.free_memory) / float(
                vm.total_memory) * 100.0

            if vm.id not in self.vm_data:
                self.vm_data[vm.id] = VMMonitorData(vm.id)

            if self.vm_data[vm.id].mem_diff is None:
                self.vm_data[vm.id].mem_diff = vm.real_memory - vm.total_memory

            vmid_msg = "VMID " + str(vm.id) + ": "
            vm.host = self.get_host_info(vm.host.id)

            logger.info(vmid_msg + "Real Memory: " + str(vm.real_memory))
            logger.info(vmid_msg + "Total Memory: " + str(vm.total_memory))
            logger.info(vmid_msg + "Free Memory: %d (%.2f%%)" %
                        (vm.free_memory, vm_pct_free_memory))

            mem_over_ratio = Config.MEM_OVER
            if vm.mem_over_ratio:
                mem_over_ratio = vm.mem_over_ratio

            if vm_pct_free_memory < (mem_over_ratio - Config.MEM_MARGIN
                                     ) or vm_pct_free_memory > (
                                         mem_over_ratio + Config.MEM_MARGIN):
                now = time.time()

                logger.debug(
                    vmid_msg +
                    "VM %s has %.2f%% of free memory, change the memory size" %
                    (vm.id, vm_pct_free_memory))
                if self.vm_data[vm.id].last_set_mem is not None:
                    logger.debug(vmid_msg +
                                 "Last memory change was %s secs ago." %
                                 (now - self.vm_data[vm.id].last_set_mem))
                else:
                    self.vm_data[vm.id].original_mem = vm.allocated_memory
                    logger.debug(
                        vmid_msg +
                        "The memory of this VM has been never modified. Store the initial memory  : "
                        + str(self.vm_data[vm.id].original_mem))
                    self.vm_data[vm.id].last_set_mem = now

                if (now - self.vm_data[vm.id].last_set_mem) < Config.COOLDOWN:
                    logger.debug(
                        vmid_msg +
                        "It is in cooldown period. No changing the memory.")
                else:
                    used_mem = vm.total_memory - vm.free_memory
                    min_free_memory = Config.MIN_FREE_MEMORY
                    # check if the VM has defined a specific MIN_FREE_MEMORY value
                    if vm.min_free_mem:
                        min_free_memory = vm.min_free_mem
                    # it not free memory use exponential backoff idea
                    if vm.free_memory <= min_free_memory:
                        logger.debug(vmid_msg + "No free memory in the VM!")
                        if self.vm_data[vm.id].no_free_memory_count > 1:
                            # if this is the third time with no free memory use the original size
                            logger.debug(
                                vmid_msg +
                                "Increase the mem to the original size.")
                            new_mem = self.vm_data[vm.id].original_mem
                            self.vm_data[vm.id].no_free_memory_count = 0
                        else:
                            logger.debug(
                                vmid_msg +
                                "Increase the mem with 50% of the original.")
                            new_mem = int(used_mem +
                                          (self.vm_data[vm.id].original_mem -
                                           used_mem) * 0.5)
                            self.vm_data[vm.id].no_free_memory_count += 1
                    else:
                        divider = 1.0 - (mem_over_ratio / 100.0)
                        logger.debug(vmid_msg +
                                     "The used memory %d is divided by %.2f" %
                                     (int(used_mem), divider))
                        new_mem = int(used_mem / divider)

                    # Check for minimum memory
                    if new_mem < Config.MEM_MIN:
                        new_mem = Config.MEM_MIN

                    # add diff to new_mem value and to total_memory to make it real_memory (vm.real_memory has delays between updates)
                    new_mem += self.vm_data[vm.id].mem_diff
                    vm.total_memory += self.vm_data[vm.id].mem_diff

                    # We never set more memory that the initial amount
                    if new_mem > self.vm_data[vm.id].original_mem:
                        new_mem = self.vm_data[vm.id].original_mem

                    if abs(int(vm.total_memory) -
                           new_mem) < Config.MEM_DIFF_TO_CHANGE:
                        logger.debug(
                            vmid_msg +
                            "Not changing the memory. Too small difference.")
                    else:
                        logger.debug(vmid_msg +
                                     "Changing the memory from %d to %d" %
                                     (vm.total_memory, new_mem))
                        if new_mem > vm.total_memory:
                            # If we increase the memory we must check if the host has enough free space
                            if not self.host_has_memory_free(
                                    vm.host, new_mem - vm.total_memory):
                                # The host has not enough free memory. Let's try to migrate a VM.
                                logger.debug(vmid_msg + "The host " +
                                             vm.host.name +
                                             " has not enough free memory!")
                                if Config.MIGRATION:
                                    logger.debug(vmid_msg +
                                                 "Let's try to migrate a VM.")
                                    if vm.host.id in self.last_migration and (
                                            now -
                                            self.last_migration[vm.host.id]
                                    ) < Config.MIGRATION_COOLDOWN:
                                        logger.debug(
                                            "The host %s is in migration cooldown period, let's wait.."
                                            % vm.host.name)
                                    else:
                                        if self.migrate_vm(
                                                vm.id, vm.host, all_vms):
                                            logger.debug(
                                                "A VM has been migrated from host %d. Store the timestamp."
                                                % vm.host.id)
                                            self.last_migration[
                                                vm.host.id] = now
                                else:
                                    logger.debug(vmid_msg +
                                                 "Migration is disabled.")
                                    if Config.FORCE_INCREASE_MEMORY:
                                        logger.debug(
                                            vmid_msg +
                                            "But Force increase memory is activated. Changing memory."
                                        )
                                        self.change_memory(
                                            vm.id, vm.host, new_mem)
                                        self.vm_data[vm.id].last_set_mem = now
                                    else:
                                        logger.debug(vmid_msg +
                                                     "Not increase memory.")
                            else:
                                logger.debug(vmid_msg + "The host " +
                                             vm.host.name +
                                             " has enough free memory.")
                                self.change_memory(vm.id, vm.host, new_mem)
                                self.vm_data[vm.id].last_set_mem = now
                        else:
                            self.change_memory(vm.id, vm.host, new_mem)
                            self.vm_data[vm.id].last_set_mem = now
        except:
            logger.exception("Error in monitor loop!")
Exemplo n.º 39
0
def run_main():
    logger.info("... Starting Raspberry Pi Power Monitor")
    logger.info("Press Ctrl-c to quit...")
    # The following empty dictionaries will hold the respective calculated values at the end of each polling cycle, which are then averaged prior to storing the value to the DB.
    solar_power_values = dict(power=[], pf=[], current=[])
    home_load_values = dict(power=[], pf=[], current=[])
    net_power_values = dict(power=[], current=[])
    ct0_dict = dict(power=[], pf=[], current=[])
    ct1_dict = dict(power=[], pf=[], current=[])
    ct2_dict = dict(power=[], pf=[], current=[])
    ct3_dict = dict(power=[], pf=[], current=[])
    ct4_dict = dict(power=[], pf=[], current=[])
    ct5_dict = dict(power=[], pf=[], current=[])
    rms_voltages = []
    i = 0  # Counter for aggregate function

    while True:
        try:
            board_voltage = get_board_voltage()
            samples = collect_data(2000)
            poll_time = samples['time']
            ct0_samples = samples['ct0']
            ct1_samples = samples['ct1']
            ct2_samples = samples['ct2']
            ct3_samples = samples['ct3']
            ct4_samples = samples['ct4']
            ct5_samples = samples['ct5']
            v_samples = samples['voltage']
            rebuilt_waves = rebuild_waves(samples, ct0_phasecal, ct1_phasecal,
                                          ct2_phasecal, ct3_phasecal,
                                          ct4_phasecal, ct5_phasecal)
            results = calculate_power(rebuilt_waves, board_voltage)

            # # RMS calculation for phase correction only - this is not needed after everything is tuned. The following code is used to compare the RMS power to the calculated real power.
            # # Ideally, you want the RMS power to equal the real power when you are measuring a purely resistive load.
            # rms_power_0 = round(results['ct0']['current'] * results['ct0']['voltage'], 2)  # AKA apparent power
            # rms_power_1 = round(results['ct1']['current'] * results['ct1']['voltage'], 2)  # AKA apparent power
            # rms_power_2 = round(results['ct2']['current'] * results['ct2']['voltage'], 2)  # AKA apparent power
            # rms_power_3 = round(results['ct3']['current'] * results['ct3']['voltage'], 2)  # AKA apparent power
            # rms_power_4 = round(results['ct4']['current'] * results['ct4']['voltage'], 2)  # AKA apparent power
            # rms_power_5 = round(results['ct5']['current'] * results['ct5']['voltage'], 2)  # AKA apparent power

            # Prepare values for database storage
            grid_0_power = results['ct0']['power']  # CT0 Real Power
            grid_1_power = results['ct1']['power']  # CT1 Real Power
            grid_2_power = results['ct2']['power']  # CT2 Real Power
            grid_3_power = results['ct3']['power']  # CT3 Real Power
            grid_4_power = results['ct4']['power']  # CT4 Real Power
            grid_5_power = results['ct5']['power']  # CT5 Real Power

            grid_0_current = results['ct0']['current']  # CT0 Current
            grid_1_current = results['ct1']['current']  # CT1 Current
            grid_2_current = results['ct2']['current']  # CT2 Current
            grid_3_current = results['ct3']['current']  # CT3 Current
            grid_4_current = results['ct4']['current']  # CT4 Current
            grid_5_current = results['ct5']['current']  # CT5 Current

            # If you are monitoring solar/generator inputs to your panel, specify which CT number(s) you are using, and uncomment the commented lines.
            solar_power = 0
            solar_current = 0
            solar_pf = 0
            # solar_power = results['ct3']['power']
            # solar_current = results['ct3']['current']
            # solar_pf = results['ct3']['pf']
            voltage = results['voltage']

            # Set solar power and current to zero if the solar power is under 20W.
            if solar_power < 20:
                solar_power = 0
                solar_current = 0
                solar_pf = 0

            # Determine if the system is net producing or net consuming right now by looking at the two panel mains.
            # Since the current measured is always positive, we need to add a negative sign to the amperage value if we're exporting power.
            if grid_0_power < 0:
                grid_0_current = grid_0_current * -1
            if grid_1_power < 0:
                grid_1_current = grid_1_current * -1
            if solar_power > 0:
                solar_current = solar_current * -1

            # Unless your specific panel setup matches mine exactly, the following four lines will likely need to be re-written:
            home_consumption_power = grid_0_power + grid_1_power + grid_2_power + grid_3_power + grid_4_power + grid_5_power + solar_power
            net_power = home_consumption_power - solar_power
            home_consumption_current = grid_0_current + grid_1_current + grid_2_current + grid_3_current + grid_4_current + grid_5_current - solar_current
            net_current = grid_0_current + grid_1_current + grid_2_current + grid_3_current + grid_4_current + grid_5_current + solar_current

            if net_power < 0:
                current_status = "Producing"
            else:
                current_status = "Consuming"

            # Average 2 readings before sending to db
            if i < 2:
                solar_power_values['power'].append(solar_power)
                solar_power_values['current'].append(solar_current)
                solar_power_values['pf'].append(solar_pf)

                home_load_values['power'].append(home_consumption_power)
                home_load_values['current'].append(home_consumption_current)
                net_power_values['power'].append(net_power)
                net_power_values['current'].append(net_current)

                ct0_dict['power'].append(results['ct0']['power'])
                ct0_dict['current'].append(results['ct0']['current'])
                ct0_dict['pf'].append(results['ct0']['pf'])
                ct1_dict['power'].append(results['ct1']['power'])
                ct1_dict['current'].append(results['ct1']['current'])
                ct1_dict['pf'].append(results['ct1']['pf'])
                ct2_dict['power'].append(results['ct2']['power'])
                ct2_dict['current'].append(results['ct2']['current'])
                ct2_dict['pf'].append(results['ct2']['pf'])
                ct3_dict['power'].append(results['ct3']['power'])
                ct3_dict['current'].append(results['ct3']['current'])
                ct3_dict['pf'].append(results['ct3']['pf'])
                ct4_dict['power'].append(results['ct4']['power'])
                ct4_dict['current'].append(results['ct4']['current'])
                ct4_dict['pf'].append(results['ct4']['pf'])
                ct5_dict['power'].append(results['ct5']['power'])
                ct5_dict['current'].append(results['ct5']['current'])
                ct5_dict['pf'].append(results['ct5']['pf'])
                rms_voltages.append(voltage)
                i += 1

            else:  # Calculate the average, send the result to InfluxDB, and reset the dictionaries for the next 2 sets of data.
                infl.write_to_influx(
                    solar_power_values,
                    home_load_values,
                    net_power_values,
                    ct0_dict,
                    ct1_dict,
                    ct2_dict,
                    ct3_dict,
                    ct4_dict,
                    ct5_dict,
                    poll_time,
                    i,
                    rms_voltages,
                )
                solar_power_values = dict(power=[], pf=[], current=[])
                home_load_values = dict(power=[], pf=[], current=[])
                net_power_values = dict(power=[], current=[])
                ct0_dict = dict(power=[], pf=[], current=[])
                ct1_dict = dict(power=[], pf=[], current=[])
                ct2_dict = dict(power=[], pf=[], current=[])
                ct3_dict = dict(power=[], pf=[], current=[])
                ct4_dict = dict(power=[], pf=[], current=[])
                ct5_dict = dict(power=[], pf=[], current=[])
                rms_voltages = []
                i = 0

                if logger.handlers[0].level == 10:
                    t = PrettyTable(
                        ['', 'CT0', 'CT1', 'CT2', 'CT3', 'CT4', 'CT5'])
                    t.add_row([
                        'Watts',
                        round(results['ct0']['power'], 3),
                        round(results['ct1']['power'], 3),
                        round(results['ct2']['power'], 3),
                        round(results['ct3']['power'], 3),
                        round(results['ct4']['power'], 3),
                        round(results['ct5']['power'], 3)
                    ])
                    t.add_row([
                        'Current',
                        round(results['ct0']['current'], 3),
                        round(results['ct1']['current'], 3),
                        round(results['ct2']['current'], 3),
                        round(results['ct3']['current'], 3),
                        round(results['ct4']['current'], 3),
                        round(results['ct5']['current'], 3)
                    ])
                    t.add_row([
                        'P.F.',
                        round(results['ct0']['pf'], 3),
                        round(results['ct1']['pf'], 3),
                        round(results['ct2']['pf'], 3),
                        round(results['ct3']['pf'], 3),
                        round(results['ct4']['pf'], 3),
                        round(results['ct5']['pf'], 3)
                    ])
                    t.add_row([
                        'Voltage',
                        round(results['voltage'], 3), '', '', '', '', ''
                    ])
                    s = t.get_string()
                    logger.debug('\n' + s)

            #sleep(0.1)

        except KeyboardInterrupt:
            infl.close_db()
            sys.exit()
Exemplo n.º 40
0
 def __init__(self, bot):
     logger.info('==================== Start application... ====================')
     self._bot = bot
     self._bot_current_elapsed_time = time.time()
     self.pairs_cache = {}       # key - str that contains events ids, value - date when found
     os.environ['MOZ_HEADLESS'] = '1'
Exemplo n.º 41
0
    def verify_changes(self, changes):
        _timestamp = changes['timestamp']
        target = GeneDocMongoDBBackend(self._target_col)
        if changes['add']:
            logging.info('Verifying "add"...')
            # _cnt = self._target_col.find({'_id': {'$in': changes['add']}}).count()
            _cnt = target.count_from_ids(changes['add'])
            if _cnt == len(changes['add']):
                logging.info('...{}=={}...OK'.format(_cnt,
                                                     len(changes['add'])))
            else:
                logging.info('...{}!={}...ERROR!!!'.format(
                    _cnt, len(changes['add'])))
        if changes['delete']:
            logging.info('Verifying "delete"...')
            # _cnt = self._target_col.find({'_id': {'$in': changes['delete']}}).count()
            _cnt = target.count_from_ids(changes['delete'])
            if _cnt == 0:
                logging.info('...{}==0...OK'.format(_cnt))
            else:
                logging.info('...{}!=0...ERROR!!!'.format(_cnt))

        logging.info("Verifying all docs have timestamp...")
        _cnt = self._target_col.find({'_timestamp': {'$exists': True}}).count()
        _cnt_all = self._target_col.count()
        if _cnt == _cnt_all:
            logging.info('{}=={}...OK'.format(_cnt, _cnt_all))
        else:
            logging.info('ERROR!!!\n\t Should be "{}", but get "{}"'.format(
                _cnt_all, _cnt))

        logging.info("Verifying all new docs have updated timestamp...")
        cur = self._target_col.find({'_timestamp': {
            '$gte': _timestamp
        }},
                                    projection={})
        _li1 = sorted(changes['add'] + [x['_id'] for x in changes['update']])
        _li2 = sorted([x['_id'] for x in cur])
        if _li1 == _li2:
            logging.info("{}=={}...OK".format(len(_li1), len(_li2)))
        else:
            logging.info('ERROR!!!\n\t Should be "{}", but get "{}"'.format(
                len(_li1), len(_li2)))
Exemplo n.º 42
0
    feat = pd.read_csv('/home/ubuntu/projects/kddcup2019track1/input/data_set_phase1/var_dist_min.csv')
    trn = trn.merge(feat, how='left', on='sid')
    tst = tst.merge(feat, how='left', on='sid')

    trn.drop(['sid', 'click_mode'], axis=1, inplace=True)
    tst.drop(['sid', 'click_mode'], axis=1, inplace=True)

    return trn, y, tst, sub


if __name__ == "__main__":
    df = merge_raw_data()

    if not os.path.exists(config.plan_file):
        logger.info('extracting plans from JSON objects.')
        plans = extract_plans(df)
        plans.to_csv(config.plan_file, index=False)

    if not os.path.exists(config.pid_feature_file):
        logger.info('generating pid features.')
        feat = generate_pid_features(df)
        feat.to_csv(config.pid_feature_file, index=False)

    if not os.path.exists(config.od_feature_file):
        logger.info('generating od features.')
        feat = generate_od_features(df)
        feat.to_csv(config.od_feature_file, index=False)

    if not os.path.exists(config.od_cluster_feature_file):
        logger.info('generating od cluster features.')
Exemplo n.º 43
0
    def Scrape(self):
        logger.info('Scraping %s' % self.request.get_full_url())

        feed = feedparser.parse(urllib2.urlopen(self.request))
        for entry in feed['entries']:
            pass
Exemplo n.º 44
0
 def get_bert_embeddings(self, processed_tweets, batch_size=128):
     logger.info("creating bert encodings")
     encoded = np.array(
         self.bert.encode(processed_tweets, batch_size=batch_size))
     logger.info("creating bert encodings complete")
     return encoded
Exemplo n.º 45
0
    def init(self):
        if self.mode == "TRAIN":
            filters = {"lang": "en"}
        else:
            filters = {}
        db = MongoHelper(MONGODB_HOST_PORT, db_name=MONGO_DB_NAME)
        logger.info("loading data")
        data = db.select(MONGODB_TWEETS_COLLECTION,
                         filters,
                         limit=self.data_len)
        logger.info(f"loaded data {len(data)} items")
        processed_tweets = [
            d.get('processed') for d in data
            if d.get("processed") is not None and d.get("processed") != ""
        ]
        if self.shuffle:
            random.shuffle(processed_tweets)
        self.tweets = processed_tweets
        self.n_batches = int(np.ceil(len(processed_tweets) / self.batch_size))

        concatenate_text = ' '.join(processed_tweets)
        all_words = list(concatenate_text.split())
        word_count = {}
        for w in all_words:
            count = word_count.get(w, 0)
            count += 1
            word_count[w] = count
        self.vocab = [
            w for w, i in word_count.items() if i >= MIN_WORD_FREQUENCY
        ] + [OOV_SYMBOL]
        logger.info(f"Vocab size {len(self.vocab)}")
        #self.vocab = list(set(all_words))

        label_encoder = LabelEncoder()
        logger.info("creating label encoding")
        label_encoder.fit(self.vocab)
        self.label_encoder = label_encoder

        logger.info("creating label encoding complete")
        transformed_labels = label_encoder.transform(self.vocab)
        for vocab, label in zip(self.vocab, transformed_labels):
            self.vocab_dict[vocab] = label
        #self.index_dd = np.array(list(map(lambda y: np.array(list(map(lambda x: self.vocab_dict[x], y.split()))), processed_tweets)))
        self.idx2token = {v: k for (k, v) in self.vocab_dict.items()}
        logger.info("encoding all data")
        self.bert = SentenceTransformer(self.bert_model_name)
        self.batch_generator = self.get_batch()
        logger.info("Initializing complete")
Exemplo n.º 46
0
 def getAll(self):
     r = self.wintest.run_cmd('dnscmd /zoneprint {zone}'.format(zone=self.zone))
     res = str(r.std_out, encoding="utf-8")
     logger.info(res)
Exemplo n.º 47
0
def train(train_loader, model, optimizer, epoch, logger, writer):
    model.train()  # train mode (dropout and batchnorm is used)

    losses = AverageMeter()
    times = AverageMeter()

    start = time.time()

    # Batches
    for i, (data) in enumerate(train_loader):
        # Move to GPU, if available
        padded_input, padded_target, input_lengths = data
        padded_input = padded_input.to(device)
        padded_target = padded_target.to(device)
        input_lengths = input_lengths.to(device)

        # Forward prop.
        pred, gold = model(padded_input, input_lengths, padded_target)
        loss, n_correct = cal_performance(pred,
                                          gold,
                                          smoothing=args.label_smoothing)
        try:
            assert (not math.isnan(loss.item()))
        except AssertionError:
            print('n_correct: ' + str(n_correct))
            print('data: ' + str(n_correct))
            continue

        # Back prop.
        optimizer.zero_grad()
        loss.backward()

        # Clip gradients
        clip_gradient(optimizer.optimizer, grad_clip)

        # Update weights
        optimizer.step()

        # Keep track of metrics
        elapsed = time.time() - start
        start = time.time()

        losses.update(loss.item())
        times.update(elapsed)

        # Print status
        if i % print_freq == 0:
            logger.info('Epoch: [{0}][{1}/{2}]\t'
                        'Batch time {time.val:.5f} ({time.avg:.5f})\t'
                        'Loss {loss.val:.5f} ({loss.avg:.5f})'.format(
                            epoch,
                            i,
                            len(train_loader),
                            time=times,
                            loss=losses))
            writer.add_scalar('step_num/train_loss', losses.avg,
                              optimizer.step_num)
            writer.add_scalar('step_num/learning_rate', optimizer.lr,
                              optimizer.step_num)

    return losses.avg
Exemplo n.º 48
0
    def get_action(self, obs_dict, step_cnt):
        """
        get actions
        :param detector_obs_list:
        :param fighter_obs_list:
        :param joint_obs_dict:
        :param step_cnt:
        :return:
        """
        detector_action = []
        fighter_action = []
        for y in range(self.fighter_num):
            tmp_course = obs_dict['fighter'][y]['course']  # (1, )
            tmp_pos = obs_dict['fighter'][y]['pos']  # (2, )
            tmp_r_visible_pos = obs_dict['fighter'][y][
                'r_visible_pos']  # (10, 2)
            tmp_l_missile = obs_dict['fighter'][y]['l_missile']  # rule use
            tmp_s_missile = obs_dict['fighter'][y]['s_missile']  # rule use
            tmp_j_visible_fp = obs_dict['fighter'][y][
                'j_visible_fp']  # rule use
            tmp_j_visible_dir = obs_dict['fighter'][y][
                'j_visible_dir']  # (10, 1)
            tmp_g_striking_pos = obs_dict['fighter'][y][
                'g_striking_pos']  # (10, 2)
            tmp_r_visible_dis = obs_dict['fighter'][y][
                'r_visible_dis']  # (10, 1)
            tmp_striking_id = obs_dict['fighter'][y]['striking_id']
            # model obs change, 归一化
            if step_cnt > STEP_BEFORE_TRAIN:
                course = tmp_course / 359.
                pos = tmp_pos / self.size_x
                l_missile = tmp_l_missile / 2.
                s_missile = tmp_s_missile / 4.
                r_visible_pos = tmp_r_visible_pos.reshape(
                    1, -1)[0] / self.size_x  # (20,)
                j_visible_dir = tmp_j_visible_dir.reshape(1,
                                                          -1)[0] / 359  # (10,)
                # g_striking_pos = tmp_g_striking_pos.reshape(1, -1)[0] / self.size_x  # (20, )  # todo
                striking_id = tmp_striking_id.reshape(1, -1)[0] / 1
                obs = np.concatenate(
                    (course, pos, l_missile, s_missile, r_visible_pos,
                     j_visible_dir, striking_id),
                    axis=0)
                logger.debug('obs: {}'.format(obs))

            true_action = np.array([0, 1, 0, 0], dtype=np.int32)
            if obs_dict['fighter'][y]['alive']:
                # rule policy
                true_action = fighter_rule(tmp_course, tmp_pos, tmp_l_missile,
                                           tmp_s_missile, tmp_r_visible_pos,
                                           tmp_r_visible_dis,
                                           tmp_j_visible_dir, tmp_j_visible_fp,
                                           tmp_striking_id, tmp_g_striking_pos,
                                           step_cnt)
                logger.debug('true action rule out: {}'.format(true_action))
                # model policy
                if step_cnt > STEP_BEFORE_TRAIN and not any(
                    [any(r_visible_pos >= 0),
                     any(j_visible_dir >= 0)]):
                    tmp_action = self.maddpg.select_action(y, obs)
                    logger.debug('tmp action: {}'.format(tmp_action))
                    # 添加动作, 将动作转换为偏角
                    tmp_action_i = np.argmax(tmp_action)
                    logger.debug('tmp action i: {}'.format(tmp_action_i))
                    true_action[0] = tmp_action_i
                    logger.debug('course: {}'.format(true_action[0]))

            logger.info('true action: {}'.format(true_action))
            fighter_action.append(true_action)
        fighter_action = np.array(fighter_action)

        return detector_action, fighter_action
Exemplo n.º 49
0
def main(params):
    t1 = time.time()
    # environment
    env = gym.make("Pendulum-v0")
    # env = NormalizedActions(env)
    env.seed(1)
    # ou_noise = OUNoise(env.action_space)
    ou_noise = GaussianNoise(env.action_space)
    n_states = env.observation_space.shape[0]  # (4, )
    n_actions = env.action_space.shape[0]  # 1 -2<x<2, changed: -1<x<1
    logger.info("obs num: %d" % n_states)
    logger.info("act num: %d" % n_actions)
    logger.info("act high: %d" % env.action_space.high)
    logger.info("act low: %d" % env.action_space.low)

    RL = DDPG(dim_obs=n_states,
              dim_act=n_actions,
              actor_lr=params.actor_lr,
              critic_lr=params.critic_lr,
              gamma=params.gamma,
              capacity=params.memory_capacity,
              batch_size=params.batch_size,
              tau=params.tau,
              hidden_size=params.hidden_size)

    # execution
    total_rewards = []
    moving_average_rewards = []
    moving_average_reward = 0.0
    a_loss_list = []
    c_loss_list = []
    total_cnt = 0
    for i_episode in range(1, params.max_eps + 1):
        total_reward = 0.0  # 每回合所有智能体的总体奖励
        # eps_r = 0.
        obs = env.reset()
        ou_noise.reset()
        for i_step in range(params.max_steps):
            # env.render()
            act = RL.select_action(obs)
            # logger.info("select act: {}".format(act))
            action = np.squeeze(act)
            action = ou_noise.get_action(action, i_step)
            # logger.info("select action: {}".format(act))
            # action = env.action_space.sample()
            next_obs, r, done, _ = env.step(action)
            total_reward += r
            # if done: r=0
            RL.memory.push(obs, action, next_obs, r, done)
            obs = next_obs

            if total_cnt > params.batch_size:
                a_loss, c_loss = RL.learn()
                a_loss_list.append(a_loss)
                c_loss_list.append(c_loss)

            if done:
                break
            total_cnt += 1

        moving_average_reward = total_reward if i_episode == 1 else moving_average_reward * 0.9 + total_reward * 0.1
        moving_average_rewards.append(moving_average_reward)
        total_rewards.append(total_reward)
        logger.info('episode:{}, reward:{}, step:{}'.format(
            i_episode, total_reward, i_step + 1))
    if not IS_TEST:
        label = '' or 'gaussian'
        # file_w(total_rewards, 'reward@{}@.txt'.format(label))
        file_w(moving_average_rewards,
               'moving_average_reward@{}@.txt'.format(label))
        # file_w(a_loss_list, 'a_loss@{}@.txt'.format(label))
        # file_w(c_loss_list, 'c_loss@{}@.txt'.format(label))

    env.close()
    t2 = time.time()
    logger.info('**********train finish!**************, time:%f' % (t2 - t1))
Exemplo n.º 50
0
 def reset_training_data(self):
     """ reshuffle and reset the data"""
     logger.info("resetting training data")
     if self.shuffle:
         random.shuffle(self.tweets)
     self.batch_generator = self.get_batch()
Exemplo n.º 51
0
def train_net(args):
    torch.manual_seed(7)
    np.random.seed(7)
    checkpoint = args.checkpoint
    start_epoch = 0
    best_acc = float('-inf')
    writer = SummaryWriter()
    epochs_since_improvement = 0

    # Initialize / load checkpoint
    if checkpoint is None:
        if args.network == 'r18':
            model = resnet18(args)
        elif args.network == 'r34':
            model = resnet34(args)
        elif args.network == 'r50':
            model = resnet50(args)
        elif args.network == 'r101':
            model = resnet101(args)
        elif args.network == 'r152':
            model = resnet152(args)
        else:
            raise TypeError('network {} is not supported.'.format(
                args.network))

        if args.pretrained:
            model.load_state_dict(torch.load('insight-face-v3.pt'))

        model = nn.DataParallel(model)
        metric_fc = ArcMarginModel(args)
        metric_fc = nn.DataParallel(metric_fc)

        if args.optimizer == 'sgd':
            optimizer = torch.optim.SGD([{
                'params': model.parameters()
            }, {
                'params': metric_fc.parameters()
            }],
                                        lr=args.lr,
                                        momentum=args.mom,
                                        nesterov=True,
                                        weight_decay=args.weight_decay)
        else:
            optimizer = torch.optim.Adam([{
                'params': model.parameters()
            }, {
                'params': metric_fc.parameters()
            }],
                                         lr=args.lr,
                                         weight_decay=args.weight_decay)

    else:
        checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        model = checkpoint['model']
        metric_fc = checkpoint['metric_fc']
        optimizer = checkpoint['optimizer']

    # Move to GPU, if available
    model = model.to(device)
    metric_fc = metric_fc.to(device)

    # Loss function
    if args.focal_loss:
        criterion = FocalLoss(gamma=args.gamma)
    else:
        criterion = nn.CrossEntropyLoss()

    # Custom dataloaders
    # train_dataset = ArcFaceDataset('train')
    # train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
    #                                            num_workers=num_workers)
    train_dataset = ArcFaceDatasetBatched('train', img_batch_size)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size //
                                               img_batch_size,
                                               shuffle=True,
                                               num_workers=num_workers,
                                               collate_fn=batched_collate_fn)

    scheduler = MultiStepLR(optimizer, milestones=[8, 16, 24, 32], gamma=0.1)

    # Epochs
    for epoch in range(start_epoch, args.end_epoch):
        lr = optimizer.param_groups[0]['lr']
        logger.info('\nCurrent effective learning rate: {}\n'.format(lr))
        # print('Step num: {}\n'.format(optimizer.step_num))
        writer.add_scalar('model/learning_rate', lr, epoch)

        # One epoch's training
        train_loss, train_top1_accs = train(train_loader=train_loader,
                                            model=model,
                                            metric_fc=metric_fc,
                                            criterion=criterion,
                                            optimizer=optimizer,
                                            epoch=epoch)

        writer.add_scalar('model/train_loss', train_loss, epoch)
        writer.add_scalar('model/train_accuracy', train_top1_accs, epoch)

        scheduler.step(epoch)

        if args.eval_ds == "LFW":
            from lfw_eval import lfw_test

            # One epochs's validata
            accuracy, threshold = lfw_test(model)

        elif args.eval_ds == "Megaface":
            from megaface_eval import megaface_test

            accuracy = megaface_test(model)

        else:
            accuracy = -1

        writer.add_scalar('model/evaluation_accuracy', accuracy, epoch)

        # Check if there was an improvement
        is_best = accuracy > best_acc
        best_acc = max(accuracy, best_acc)
        if not is_best:
            epochs_since_improvement += 1
            logger.info("\nEpochs since last improvement: %d\n" %
                        (epochs_since_improvement, ))
        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, metric_fc,
                        optimizer, best_acc, is_best, scheduler)
Exemplo n.º 52
0
def parse_twitter(parser):
    """
    Main Twitter posts parsing function.
    :param parser: Parser
    :param keywords: list
    :return: None
    """
    start = time.time()
    print(start)
    keywords = parser.keywords.keywords
    browser = parser.browser
    available_time = 10 * 60 * 60 / len(keywords)  # 30 minutes total available
    logger.info("Maximum available time per word: %s seconds" % available_time)

    yesterday = datetime.now() - timedelta(days=1)
    yesterday = yesterday.strftime("  %d").replace(" 0", "")
    logger.info("Starting parsing process. yesterday: %str_check", yesterday)
    send("Starting parsing process.")
    proxies_iterations = 0

    for ind, keyword in enumerate(keywords):
        print(keyword)
        i = 0
        loaded = False
        while not loaded:
            try:
                url = 'https://twitter.com/search?q=' + keyword + '%20lang%3Auk&f=live'
                browser.get(url)
                WebDriverWait(browser, 7).until(
                    expected_conditions.presence_of_element_located(
                        (By.CSS_SELECTOR,
                         'section[aria-labelledby="accessible-list-0"]')))
                loaded = True
            except:
                logger.error("Error with proxy! Trying %s time" % i)
                i += 1
                proxies_iterations += 1
                if i % 20 == 0:
                    logger.info("Refreshing proxy list")
                    parser.browser = parser.browser_setup(
                        iterator=proxies_iterations, update_proxies=True)
                elif i % 2 == 0:
                    parser.browser = parser.browser_setup(proxies_iterations)

        if not loaded:
            continue

        main_start = time.time()
        print(main_start)

        all_tweets = set()
        finished = False
        last_len = 0
        stop_parsing_count = 0
        iterations = 0
        while not finished and time.time() - main_start < available_time * (
                ind + 1):
            finished = parse_tweets(browser, all_tweets, parser)
            if finished == 'error':
                logger.info("IP Address Error. Changing it...")
                send("IP Address Error. Changing it...")
                proxies_iterations += 1
                parser.browser = parser.browser_setup(proxies_iterations)
                finished = True
                continue
            browser.execute_script(
                "window.scrollTo(0,  document.body.scrollHeight)")
            print(iterations, stop_parsing_count, len(all_tweets),
                  (time.time() - start) / 60, all_tweets)
            if len(all_tweets) == last_len:
                stop_parsing_count += 1
            else:
                stop_parsing_count = 0
            if stop_parsing_count == 100:
                logger.info("IP Address exhausted. Changing it...")
                send("IP Address exhausted. Changing it...")
                send("Parsed %s tweets for word %s" % (last_len, keyword))
                proxies_iterations += 1
                parser.browser = parser.browser_setup(proxies_iterations)
                finished = True
                continue
            iterations += 1
            last_len = len(all_tweets)

        send("Parsed %s tweets for word %s. Taken %s minutes" %
             (len(all_tweets), keyword, (time.time() - main_start) / 60))
Exemplo n.º 53
0
def curate_nfl():
    save_dir = os.path.join(Config.DATA_DIR, 'sports_bettors', 'curated',
                            'nfl')
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    # Remove spaces, dashes from feature labels
    def _clean_label(l):
        return re.sub('\.', '', re.sub(' ', '', re.sub('-', '', l)))

    curation = []
    logger.info('Importing NFL Data to Pandas')
    for team in tqdm([
            fn for fn in os.listdir(
                os.path.join(Config.DATA_DIR, 'sports_bettors', 'raw', 'nfl'))
            if '_raw' in fn
    ]):
        with open(
                os.path.join(Config.DATA_DIR, 'sports_bettors', 'raw', 'nfl',
                             team)) as fp:
            team_data = json.load(fp)

        # Iterate through dates for team
        for date, game_data in team_data.items():
            # Save date information of game
            curated = {
                'year': pd.Timestamp(date).year,
                'month': pd.Timestamp(date).month,
                'day': pd.Timestamp(date).day,
                'away_team': game_data['teams'][0],
                'home_team': game_data['teams'][1]
            }

            # Wrangle features for home team
            home_features = [
                'home_' + _clean_label(feature)
                for feature in game_data['features']
            ]
            home_vals = [val[0] for val in game_data['values']]
            curated.update(
                {label: val
                 for label, val in zip(home_features, home_vals)})

            # Wrangle features for away team
            away_features = [
                'away_' + _clean_label(feature)
                for feature in game_data['features']
            ]
            away_vals = [val[1] for val in game_data['values']]
            curated.update(
                {label: val
                 for label, val in zip(away_features, away_vals)})

            # Wrangle home points
            score_labels = ['pts_Q1', 'pts_Q2', 'pts_Q3', 'pts_Q4', 'points']
            home_score_labels = ['home_' + label for label in score_labels]
            home_scores = [
                val for val in game_data['quarter_values'][0][2:6] +
                [game_data['quarter_values'][0][-1]]
            ]
            curated.update({
                label: val
                for label, val in zip(home_score_labels, home_scores)
            })

            # Wrangle away points
            away_score_labels = ['away_' + label for label in score_labels]
            away_scores = [
                val for val in game_data['quarter_values'][1][2:6] +
                [game_data['quarter_values'][1][-1]]
            ]
            curated.update({
                label: val
                for label, val in zip(away_score_labels, away_scores)
            })

            # Gather
            curation.append(curated)
    df_curation = pd.DataFrame.from_records(curation).drop_duplicates()
    logger.info('Saving Curation with shape: {}'.format(df_curation.shape))
    df_curation.to_csv(os.path.join(save_dir, 'df_stats.csv'), index=False)

    # Clean up fields
    def _dash_curate(stat: str, idx: int):
        stat = re.sub('--', '-', stat)
        try:
            return int(float(
                stat.split('-')[idx])) if len(stat) > 2 else np.nan
        except Exception as err:
            logger.info('{}: {}'.format(stat, err))
            return np.nan

    for home_away in ['home', 'away']:
        # Clean up passing stats
        df_curation[home_away + '_passCompletions'] = df_curation[
            home_away +
            '_CmpAttYdTDINT'].apply(lambda stat: _dash_curate(stat, 0))
        df_curation[home_away + '_passAttempts'] = df_curation[
            home_away +
            '_CmpAttYdTDINT'].apply(lambda stat: _dash_curate(stat, 1))
        df_curation[home_away + '_passYards'] = df_curation[
            home_away +
            '_CmpAttYdTDINT'].apply(lambda stat: _dash_curate(stat, 2))
        df_curation[home_away + '_passTDs'] = df_curation[
            home_away +
            '_CmpAttYdTDINT'].apply(lambda stat: _dash_curate(stat, 3))
        df_curation[home_away + '_interceptions'] = df_curation[
            home_away +
            '_CmpAttYdTDINT'].apply(lambda stat: _dash_curate(stat, 4))
        df_curation = df_curation.drop(home_away + '_CmpAttYdTDINT', axis=1)

        # Clean up rushing stats
        df_curation[home_away + '_rushAttempts'] = df_curation[
            home_away +
            '_RushYdsTDs'].apply(lambda stat: _dash_curate(stat, 0))
        df_curation[home_away + '_rushYards'] = df_curation[
            home_away +
            '_RushYdsTDs'].apply(lambda stat: _dash_curate(stat, 1))
        df_curation[home_away +
                    '_rushTDs'] = df_curation[home_away + '_RushYdsTDs'].apply(
                        lambda stat: _dash_curate(stat, 2))
        df_curation = df_curation.drop(home_away + '_RushYdsTDs', axis=1)

        # Fourth Down
        df_curation[home_away + '_FourthDownConv'] = df_curation[
            home_away + '_FourthDownConv'].fillna('0-0')
        df_curation[home_away + '_fourthDownConversions'] = df_curation[
            home_away +
            '_FourthDownConv'].apply(lambda stat: _dash_curate(stat, 0))
        df_curation[home_away + '_fourthDownAttempts'] = df_curation[
            home_away +
            '_FourthDownConv'].apply(lambda stat: _dash_curate(stat, 1))
        df_curation = df_curation.drop(home_away + '_FourthDownConv', axis=1)

        # Fumbles Lost
        df_curation[home_away + '_fumbles'] = df_curation[
            home_away +
            '_FumblesLost'].apply(lambda stat: _dash_curate(stat, 0))
        df_curation[home_away + '_fumblesLost'] = df_curation[
            home_away +
            '_FumblesLost'].apply(lambda stat: _dash_curate(stat, 1))
        df_curation = df_curation.drop(home_away + '_FumblesLost', axis=1)

        # Clean up penalties
        df_curation[home_away + '_penalties'] = df_curation[
            home_away +
            '_PenaltiesYards'].apply(lambda stat: _dash_curate(stat, 0))
        df_curation[home_away + '_penaltyYards'] = df_curation[
            home_away +
            '_PenaltiesYards'].apply(lambda stat: _dash_curate(stat, 1))
        df_curation = df_curation.drop(home_away + '_PenaltiesYards', axis=1)

        # Clean up sacks
        df_curation[home_away +
                    '_sacks'] = df_curation[home_away + '_SackedYards'].apply(
                        lambda stat: _dash_curate(stat, 0))
        df_curation[home_away + '_sackedYards'] = df_curation[
            home_away +
            '_SackedYards'].apply(lambda stat: _dash_curate(stat, 1))
        df_curation = df_curation.drop(home_away + '_SackedYards', axis=1)

        # Third Down
        df_curation[home_away + '_ThirdDownConv'] = df_curation[
            home_away + '_ThirdDownConv'].fillna('0-0')
        df_curation[home_away + '_thirdDownConversions'] = df_curation[
            home_away +
            '_ThirdDownConv'].apply(lambda stat: _dash_curate(stat, 0))
        df_curation[home_away + '_thirdDownAttempts'] = df_curation[
            home_away +
            '_ThirdDownConv'].apply(lambda stat: _dash_curate(stat, 1))
        df_curation = df_curation.drop(home_away + '_ThirdDownConv', axis=1)

        # Possession Time
        df_curation[home_away + '_TimeofPossession'] = df_curation[
            home_away + '_TimeofPossession'].fillna('00:00')
        df_curation[home_away + '_possessionTime'] = df_curation[
            home_away + '_TimeofPossession'].apply(
                lambda stat: float(stat.split(':')[0]) + float(
                    stat.split(':')[1]) / 60 if len(stat) > 4 else np.nan)
        df_curation = df_curation.drop(home_away + '_TimeofPossession', axis=1)

        # Convert back to NA
        df_curation[home_away + '_possessionTime'] = df_curation.apply(
            lambda row: row[home_away + '_possessionTime']
            if row['year'] > 1983 else np.nan,
            axis=1)
        df_curation[home_away + '_fourthDownConversions'] = df_curation.apply(
            lambda row: row[home_away + '_fourthDownConversions']
            if row['year'] > 1991 else np.nan,
            axis=1)
        df_curation[home_away + '_fourthDownAttempts'] = df_curation.apply(
            lambda row: row[home_away + '_fourthDownAttempts']
            if row['year'] > 1991 else np.nan,
            axis=1)
        df_curation[home_away + '_thirdDownConversions'] = df_curation.apply(
            lambda row: row[home_away + '_thirdDownConversions']
            if row['year'] > 1991 else np.nan,
            axis=1)
        df_curation[home_away + '_thirdDownAttempts'] = df_curation.apply(
            lambda row: row[home_away + '_thirdDownAttempts']
            if row['year'] > 1991 else np.nan,
            axis=1)

    # Wrangle from home / away to team / opponent
    df_modeling = []
    all_teams = set(
        list(df_curation['home_team']) + list(df_curation['away_team']))
    for team in tqdm(all_teams):
        # Games where team is home
        df_home = df_curation[df_curation['home_team'] == team].copy()
        df_home['team'] = team
        df_home['is_home'] = 1
        df_home['opponent'] = df_home['away_team']
        df_home = df_home.drop(['home_team', 'away_team'], axis=1)
        df_home.columns = [
            re.sub('away_', 'opp_', re.sub('home_', '', col))
            for col in df_home.columns
        ]
        df_modeling.append(df_home)

        # Games where team is away
        df_away = df_curation[df_curation['away_team'] == team].copy()
        df_away['team'] = team
        df_away['is_home'] = 0
        df_away['opponent'] = df_away['home_team']
        df_away = df_away.drop(['home_team', 'away_team'], axis=1)
        df_away.columns = [
            re.sub('home_', 'opp_', re.sub('away_', '', col))
            for col in df_away.columns
        ]
        df_modeling.append(df_away)
    df_modeling = pd.concat(df_modeling,
                            sort=True).drop_duplicates().reset_index(drop=True)

    # Matchup
    def _define_matchup(main_team, opponent):
        return '_vs_'.join(sorted([main_team, opponent]))

    df_modeling['matchup'] = df_modeling.apply(
        lambda row: _define_matchup(row['team'], row['opponent']), axis=1)

    logger.info('Save Curated data for {} games.'.format(df_modeling.shape[0]))
    df_modeling.to_csv(os.path.join(Config.DATA_DIR, 'sports_bettors',
                                    'curated', 'nfl', 'df_curated.csv'),
                       index=False)
Exemplo n.º 54
0
        if not connection_established:
            raise Exception(
                "Could not connect to InfluxDB. Check that the container is running!"
            )

        run_main()

    else:
        # Program launched in one of the non-main modes. Increase logging level.
        logger.setLevel(logging.DEBUG)
        logger.handlers[0].setLevel(logging.DEBUG)
        if 'help' in MODE.lower() or '-h' in MODE.lower():

            logger.info(
                "\nSee the project Wiki for more detailed usage instructions: https://github.com/David00/rpi-power-monitor/wiki"
            )
            logger.info("""\nUsage:
                Start the program:                                  python3 power-monitor.py

                Collect raw data and build an interactive plot:     python3 power-monitor.py debug "chart title here" 

                Use the previously collected data to tune phase
                correction:                                         python3 power-monitor.py phase "chart title here"
                """)

        if MODE.lower() == 'debug':
            # This mode is intended to take a look at the raw CT sensor data.  It will take 2000 samples from each CT sensor, plot them to a single chart, write the chart to an HTML file located in /var/www/html/, and then terminate.
            # It also stores the samples to a file located in ./data/samples/last-debug.pkl so that the sample data can be read when this program is started in 'phase' mode.
            samples = collect_data(2000)
            ct0_samples = samples['ct0']
Exemplo n.º 55
0
def gen_plan_feas(data):
    n = data.shape[0]
    mode_list_feas = np.zeros((n, 12))
    max_dist, min_dist, mean_dist, std_dist = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
    max_price, min_price, mean_price, std_price = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
    max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
    min_dist_mode, max_dist_mode, min_price_mode, max_price_mode, min_eta_mode, max_eta_mode, first_mode = np.zeros(
        (n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
    mode_texts = []
    for i, plan in tqdm(enumerate(data['plans'].values)):
        try:
            cur_plan_list = json.loads(plan)
        except:
            cur_plan_list = []
        if len(cur_plan_list) == 0:
            mode_list_feas[i, 0] = 1
            first_mode[i] = 0

            max_dist[i] = -1
            min_dist[i] = -1
            mean_dist[i] = -1
            std_dist[i] = -1

            max_price[i] = -1
            min_price[i] = -1
            mean_price[i] = -1
            std_price[i] = -1

            max_eta[i] = -1
            min_eta[i] = -1
            mean_eta[i] = -1
            std_eta[i] = -1

            min_dist_mode[i] = -1
            max_dist_mode[i] = -1
            min_price_mode[i] = -1
            max_price_mode[i] = -1
            min_eta_mode[i] = -1
            max_eta_mode[i] = -1

            mode_texts.append('word_null')
        else:
            distance_list = []
            price_list = []
            eta_list = []
            mode_list = []
            for tmp_dit in cur_plan_list:
                distance_list.append(int(tmp_dit['distance']))
                if tmp_dit['price'] == '':
                    price_list.append(0)
                else:
                    price_list.append(int(tmp_dit['price']))
                eta_list.append(int(tmp_dit['eta']))
                mode_list.append(int(tmp_dit['transport_mode']))
            mode_texts.append(
                ' '.join(['word_{}'.format(mode) for mode in mode_list]))
            distance_list = np.array(distance_list)
            price_list = np.array(price_list)
            eta_list = np.array(eta_list)
            mode_list = np.array(mode_list, dtype='int')
            mode_list_feas[i, mode_list] = 1
            distance_sort_idx = np.argsort(distance_list)
            price_sort_idx = np.argsort(price_list)
            eta_sort_idx = np.argsort(eta_list)

            max_dist[i] = distance_list[distance_sort_idx[-1]]
            min_dist[i] = distance_list[distance_sort_idx[0]]
            mean_dist[i] = np.mean(distance_list)
            std_dist[i] = np.std(distance_list)

            max_price[i] = price_list[price_sort_idx[-1]]
            min_price[i] = price_list[price_sort_idx[0]]
            mean_price[i] = np.mean(price_list)
            std_price[i] = np.std(price_list)

            max_eta[i] = eta_list[eta_sort_idx[-1]]
            min_eta[i] = eta_list[eta_sort_idx[0]]
            mean_eta[i] = np.mean(eta_list)
            std_eta[i] = np.std(eta_list)

            first_mode[i] = mode_list[0]
            max_dist_mode[i] = mode_list[distance_sort_idx[-1]]
            min_dist_mode[i] = mode_list[distance_sort_idx[0]]

            max_price_mode[i] = mode_list[price_sort_idx[-1]]
            min_price_mode[i] = mode_list[price_sort_idx[0]]

            max_eta_mode[i] = mode_list[eta_sort_idx[-1]]
            min_eta_mode[i] = mode_list[eta_sort_idx[0]]

    feature_data = pd.DataFrame(mode_list_feas)
    feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]
    feature_data['max_dist'] = max_dist
    feature_data['min_dist'] = min_dist
    feature_data['mean_dist'] = mean_dist
    feature_data['std_dist'] = std_dist

    feature_data['max_price'] = max_price
    feature_data['min_price'] = min_price
    feature_data['mean_price'] = mean_price
    feature_data['std_price'] = std_price

    feature_data['max_eta'] = max_eta
    feature_data['min_eta'] = min_eta
    feature_data['mean_eta'] = mean_eta
    feature_data['std_eta'] = std_eta

    feature_data['max_dist_mode'] = max_dist_mode
    feature_data['min_dist_mode'] = min_dist_mode
    feature_data['max_price_mode'] = max_price_mode
    feature_data['min_price_mode'] = min_price_mode
    feature_data['max_eta_mode'] = max_eta_mode
    feature_data['min_eta_mode'] = min_eta_mode
    feature_data['first_mode'] = first_mode
    logger.info('mode tfidf...')
    tfidf_enc = TfidfVectorizer(ngram_range=(1, 2))
    tfidf_vec = tfidf_enc.fit_transform(mode_texts)
    svd_enc = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)
    mode_svd = svd_enc.fit_transform(tfidf_vec)
    mode_svd = pd.DataFrame(mode_svd)
    mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]

    data = pd.concat([data, feature_data, mode_svd], axis=1)
    data = data.drop(['plans'], axis=1)
    return data
Exemplo n.º 56
0
def run_main():
    logger.info("Press Ctrl-c to quit...")
    # The following empty dictionaries will hold the respective calculated values at the end of each polling cycle, which are then averaged prior to storing the value to the DB.
    solar_power_values = dict(power=[], pf=[], current=[])
    home_load_values = dict(power=[], pf=[], current=[])
    net_power_values = dict(power=[], current=[])
    ct0_dict = dict(power=[], pf=[], current=[])
    ct1_dict = dict(power=[], pf=[], current=[])
    ct2_dict = dict(power=[], pf=[], current=[])
    ct3_dict = dict(power=[], pf=[], current=[])
    ct4_dict = dict(power=[], pf=[], current=[])
    ct5_dict = dict(power=[], pf=[], current=[])
    rms_voltage_values = []
    i = 0  # Counter for aggregate function

    while True:
        try:
            board_voltage = get_board_voltage()
            samples = collect_data(2000)
            poll_time = samples['time']
            ct0_samples = samples['ct0']
            ct1_samples = samples['ct1']
            ct2_samples = samples['ct2']
            ct3_samples = samples['ct3']
            ct4_samples = samples['ct4']
            ct5_samples = samples['ct5']
            v_samples = samples['voltage']
            rebuilt_waves = rebuild_waves(samples, ct0_phasecal, ct1_phasecal,
                                          ct2_phasecal, ct3_phasecal,
                                          ct4_phasecal, ct5_phasecal)
            results = calculate_power(rebuilt_waves, board_voltage)

            # # RMS calculation for phase correction only - this is not needed after everything is tuned. The following code is used to compare the RMS power to the calculated real power.
            # # Ideally, you want the RMS power to equal the real power when you are measuring a purely resistive load.
            # rms_power_0 = round(results['ct0']['current'] * results['ct0']['voltage'], 2)  # AKA apparent power
            # rms_power_1 = round(results['ct1']['current'] * results['ct1']['voltage'], 2)  # AKA apparent power
            # rms_power_2 = round(results['ct2']['current'] * results['ct2']['voltage'], 2)  # AKA apparent power
            # rms_power_3 = round(results['ct3']['current'] * results['ct3']['voltage'], 2)  # AKA apparent power
            # rms_power_4 = round(results['ct4']['current'] * results['ct4']['voltage'], 2)  # AKA apparent power
            # rms_power_5 = round(results['ct5']['current'] * results['ct5']['voltage'], 2)  # AKA apparent power
            # phase_corrected_power_0 = results['ct0']['power']
            # phase_corrected_power_1 = results['ct1']['power']
            # phase_corrected_power_2 = results['ct2']['power']
            # phase_corrected_power_3 = results['ct3']['power']
            # phase_corrected_power_4 = results['ct4']['power']
            # phase_corrected_power_5 = results['ct5']['power']

            # # diff is the difference between the real_power (phase corrected) compared to the simple rms power calculation.
            # # This is used to calibrate for the "unknown" phase error in each CT.  The phasecal value for each CT input should be adjusted so that diff comes as close to zero as possible.
            # diff_0 = phase_corrected_power_0 - rms_power_0
            # diff_1 = phase_corrected_power_1 - rms_power_1
            # diff_2 = phase_corrected_power_2 - rms_power_2
            # diff_3 = phase_corrected_power_3 - rms_power_3
            # diff_4 = phase_corrected_power_4 - rms_power_4
            # diff_5 = phase_corrected_power_5 - rms_power_5

            # Phase Corrected Results
            # logger.debug("\n")
            # logger.debug(f"CT0 Real Power: {round(results['ct0']['power'], 2):>10} W | Amps: {round(results['ct0']['current'], 2):<7} | RMS Power: {round(results['ct0']['current'] * results['ct0']['voltage'], 2):<6} W | PF: {round(results['ct0']['pf'], 5)}")
            # logger.debug(f"CT1 Real Power: {round(results['ct1']['power'], 2):>10} W | Amps: {round(results['ct1']['current'], 2):<7} | RMS Power: {round(results['ct1']['current'] * results['ct1']['voltage'], 2):<6} W | PF: {round(results['ct1']['pf'], 5)}")
            # logger.debug(f"CT2 Real Power: {round(results['ct2']['power'], 2):>10} W | Amps: {round(results['ct2']['current'], 2):<7} | RMS Power: {round(results['ct2']['current'] * results['ct2']['voltage'], 2):<6} W | PF: {round(results['ct2']['pf'], 5)}")
            # logger.debug(f"CT3 Real Power: {round(results['ct3']['power'], 2):>10} W | Amps: {round(results['ct3']['current'], 2):<7} | RMS Power: {round(results['ct3']['current'] * results['ct3']['voltage'], 2):<6} W | PF: {round(results['ct3']['pf'], 5)}")
            # logger.debug(f"CT4 Real Power: {round(results['ct4']['power'], 2):>10} W | Amps: {round(results['ct4']['current'], 2):<7} | RMS Power: {round(results['ct4']['current'] * results['ct4']['voltage'], 2):<6} W | PF: {round(results['ct4']['pf'], 5)}")
            # logger.debug(f"CT5 Real Power: {round(results['ct5']['power'], 2):>10} W | Amps: {round(results['ct5']['current'], 2):<7} | RMS Power: {round(results['ct5']['current'] * results['ct5']['voltage'], 2):<6} W | PF: {round(results['ct5']['pf'], 5)}")
            # logger.debug(f"Line Voltage: {round(results['voltage'], 2)} V")

            # Prepare values for database storage
            grid_0_power = results['ct0']['power']  # 200A Main (left)
            grid_1_power = results['ct1']['power']  # 200A Main (right)
            grid_2_power = results['ct2']['power']  # 100A Main (top)
            grid_4_power = results['ct4']['power']  # 100A Main (bottom)
            grid_5_power = results['ct5']['power']  # Unused

            grid_0_current = results['ct0']['current']
            grid_1_current = results['ct1']['current']
            grid_2_current = results['ct2']['current']
            grid_4_current = results['ct4']['current']
            grid_5_current = results['ct5']['current']

            solar_power = results['ct3']['power']
            solar_current = results['ct3']['current']
            solar_pf = results['ct3']['pf']

            # Set solar power and current to zero if the solar power is under 20W.
            if solar_power < 20:
                solar_power = 0
                solar_current = 0
                solar_pf = 0

            # Determine if the system is net producing or net consuming right now by looking at the two panel mains.
            # Since the current measured is always positive, we need to add a negative sign to the amperage value if we're exporting power.
            if grid_0_power < 0:
                grid_0_current = grid_0_current * -1
            if grid_1_power < 0:
                grid_1_current = grid_1_current * -1
            if solar_power > 0:
                solar_current = solar_current * -1

            # Unless your specific panel setup matches mine exactly, the following four lines will likely need to be re-written:
            home_consumption_power = grid_2_power + grid_4_power + grid_0_power + grid_1_power + solar_power
            net_power = home_consumption_power - solar_power
            home_consumption_current = grid_2_current + grid_4_current + grid_0_current + grid_1_current - solar_current
            net_current = grid_0_current + grid_1_current + grid_2_current + grid_4_current + solar_current

            if net_power < 0:
                current_status = "Producing"
            else:
                current_status = "Consuming"

            # Average 2 readings before sending to db
            if i < 2:
                solar_power_values['power'].append(solar_power)
                solar_power_values['current'].append(solar_current)
                solar_power_values['pf'].append(solar_pf)

                home_load_values['power'].append(home_consumption_power)
                home_load_values['current'].append(home_consumption_current)
                net_power_values['power'].append(net_power)
                net_power_values['current'].append(net_current)

                ct0_dict['power'].append(results['ct0']['power'])
                ct0_dict['current'].append(results['ct0']['current'])
                ct0_dict['pf'].append(results['ct0']['pf'])
                ct1_dict['power'].append(results['ct1']['power'])
                ct1_dict['current'].append(results['ct1']['current'])
                ct1_dict['pf'].append(results['ct1']['pf'])
                ct2_dict['power'].append(results['ct2']['power'])
                ct2_dict['current'].append(results['ct2']['current'])
                ct2_dict['pf'].append(results['ct2']['pf'])
                ct3_dict['power'].append(results['ct3']['power'])
                ct3_dict['current'].append(results['ct3']['current'])
                ct3_dict['pf'].append(results['ct3']['pf'])
                ct4_dict['power'].append(results['ct4']['power'])
                ct4_dict['current'].append(results['ct4']['current'])
                ct4_dict['pf'].append(results['ct4']['pf'])
                ct5_dict['power'].append(results['ct5']['power'])
                ct5_dict['current'].append(results['ct5']['current'])
                ct5_dict['pf'].append(results['ct5']['pf'])
                i += 1

            else:  # Calculate the average, send the result to InfluxDB, and reset the dictionaries for the next 2 sets of data.
                infl.write_to_influx(solar_power_values, home_load_values,
                                     net_power_values, ct0_dict, ct1_dict,
                                     ct2_dict, ct3_dict, ct4_dict, ct5_dict,
                                     poll_time, i)
                solar_power_values = dict(power=[], pf=[], current=[])
                home_load_values = dict(power=[], pf=[], current=[])
                net_power_values = dict(power=[], current=[])
                ct0_dict = dict(power=[], pf=[], current=[])
                ct1_dict = dict(power=[], pf=[], current=[])
                ct2_dict = dict(power=[], pf=[], current=[])
                ct3_dict = dict(power=[], pf=[], current=[])
                ct4_dict = dict(power=[], pf=[], current=[])
                ct5_dict = dict(power=[], pf=[], current=[])
                i = 0

                if logger.handlers[0].level == 10:
                    t = PrettyTable(
                        ['', 'CT0', 'CT1', 'CT2', 'CT3', 'CT4', 'CT5'])
                    t.add_row([
                        'Watts',
                        round(results['ct0']['power'], 3),
                        round(results['ct1']['power'], 3),
                        round(results['ct2']['power'], 3),
                        round(results['ct3']['power'], 3),
                        round(results['ct4']['power'], 3),
                        round(results['ct5']['power'], 3)
                    ])
                    t.add_row([
                        'Current',
                        round(results['ct0']['current'], 3),
                        round(results['ct1']['current'], 3),
                        round(results['ct2']['current'], 3),
                        round(results['ct3']['current'], 3),
                        round(results['ct4']['current'], 3),
                        round(results['ct5']['current'], 3)
                    ])
                    t.add_row([
                        'P.F.',
                        round(results['ct0']['pf'], 3),
                        round(results['ct1']['pf'], 3),
                        round(results['ct2']['pf'], 3),
                        round(results['ct3']['pf'], 3),
                        round(results['ct4']['pf'], 3),
                        round(results['ct5']['pf'], 3)
                    ])
                    t.add_row([
                        'Voltage',
                        round(results['voltage'], 3), '', '', '', '', ''
                    ])
                    s = t.get_string()
                    logger.debug('\n' + s)

            #sleep(0.1)

        except KeyboardInterrupt:
            infl.close_db()
            sys.exit()
Exemplo n.º 57
0
def update_from_temp_collections(config, no_confirm=False, use_parallel=False):
    t0 = time.time()
    sc = GeneDocSyncer(config)
    new_src_li = sc.get_new_source_list()
    if not new_src_li:
        logging.info("No new source collections need to update. Abort now.")
        return

    logging.info("Found {} new source collections need to update:".format(
        len(new_src_li)))
    logging.info("\n".join(['\t' + x for x in new_src_li]))

    if no_confirm or ask('Continue?') == 'Y':
        logfile = 'databuild_sync_{}_{}.log'.format(config,
                                                    time.strftime('%Y%m%d'))
        logfile = os.path.join(LOG_FOLDER, logfile)
        setup_logfile(logfile)

        for src in new_src_li:
            t0 = time.time()
            logging.info("Current source collection: %s" % src)
            ts = _get_timestamp(src, as_str=True)
            logging.info("Calculating changes... ")
            changes = sc.get_changes(src, use_parallel=use_parallel)
            logging.info("Done")
            get_changes_stats(changes)
            if no_confirm or ask("Continue to save changes...") == 'Y':
                if config == 'genedoc_mygene':
                    dumpfile = 'changes_{}.pyobj'.format(ts)
                else:
                    dumpfile = 'changes_{}_allspecies.pyobj'.format(ts)
                dump(changes, dumpfile)
                dumpfile_key = 'genedoc_changes/' + dumpfile
                logging.info('Saving to S3: "{}"... '.format(dumpfile_key))
                send_s3_file(dumpfile, dumpfile_key)
                logging.info('Done.')
                #os.remove(dumpfile)

            if no_confirm or ask("Continue to apply changes...") == 'Y':
                sc.apply_changes(changes)
                sc.verify_changes(changes)
            logging.info('=' * 20)
            logging.info("Finished. %s" % timesofar(t0))
Exemplo n.º 58
0
from gevent import monkey
monkey.patch_all()

import os
from config import logger
from config import App
from config import Db
from config import Client
from config import Jsonrpc
from utils.sim_btc_utils import sim_btc_utils
from utils.hc_utils import hc_utils
from utils.usdt_utils import usdt_utils
from utils.btm_utils import btm_utils
import time
logger.info('Start app...')
app = App
db = Db
client = Client
jsonrpc = Jsonrpc
sim_btc_utils_all = ["btc", "ltc", "ub", "bch", "doge"]
sim_btc_plugin = {}
for value in sim_btc_utils_all:
    upper = value.upper()
    sim_btc_config = {}
    if app.config.has_key(upper + "_HOST") and app.config.has_key(
            upper + "_PORT") and app.config.has_key(upper + "_FEE"):
        sim_btc_config["host"] = app.config[upper + "_HOST"]
        sim_btc_config["port"] = app.config[upper + "_PORT"]
        sim_btc_config["collect_host"] = app.config[upper + "_COLLECT_HOST"]
        sim_btc_config["collect_port"] = app.config[upper + "_COLLECT_PORT"]
Exemplo n.º 59
0
def SendPhoneEmail(phone, link, price, average_price):
  msg = MIMEText('<a href="%(link)s">%(link)s</a>' % link, 'html')
  msg['Subject'] = '%s ($%s, average $%s)' % (phone.ToString(), price,
                                              average_price)
  SendEmail(msg)
  logger.info('Email for %s sent.' % phone.ToString())
Exemplo n.º 60
0
        s.connect(('10.255.255.255', 1))
        IP = s.getsockname()[0]
    except:
        IP = None
    finally:
        s.close()
    return IP


if __name__ == '__main__':

    # Backup config.py file
    try:
        copyfile('config.py', 'config.py.backup')
    except FileNotFoundError:
        logger.info("Could not create a backup of config.py file.")

    if len(sys.argv) > 1:
        MODE = sys.argv[1]
        if MODE == 'debug' or MODE == 'phase':
            try:
                title = sys.argv[2]
            except IndexError:
                title = None
        # Create the data/samples directory:
        try:
            os.makedirs('data/samples/')
        except FileExistsError:
            pass
    else:
        MODE = None