Пример #1
0
 def aborted(self):
     # remove partial file
     try:
         gnomevfs.unlink(self.output_filename)
     except:
         log("cannot delete: '%s'" % beautify_uri(self.output_filename))
     return
Пример #2
0
def _clear_watched_items( clear_type ):
    utils.log( "_clear_watched_items( %s )" % ( clear_type ), xbmc.LOGNOTICE )
    # initialize base_path
    base_paths = []
    # clear trivia or trailers
    if ( clear_type == "ClearWatchedTrailers" ):
        # handle AMT db special
        sys.path.append( os.path.join( BASE_RESOURCE_PATH, "lib", "scrapers") )
        from amt_database import scraper as scraper
        Scraper = scraper.Main()
        # update trailers
        Scraper.clear_watched()
        # set base watched file path
        base_paths += [ os.path.join( BASE_CURRENT_SOURCE_PATH, "amt_current_watched.txt" ) ]
        base_paths += [ os.path.join( BASE_CURRENT_SOURCE_PATH, "local_watched.txt" ) ]
    else:
        # set base watched file path
        base_paths = [ os.path.join( BASE_CURRENT_SOURCE_PATH, "trivia_watched.txt" ) ]
    try:
        # set proper message
        message = ( 32531, 32541, )[ sys.argv[ 1 ] == "ClearWatchedTrailers" ]
        # remove watched status file(s)
        for base_path in base_paths:
            # remove file if it exists
            if ( xbmcvfs.exists( base_path ) ):
                xbmcvfs.delete( base_path )
    except:
        # set proper message
        message = ( 32532, 32542, )[ sys.argv[ 1 ] == "ClearWatchedTrailers" ]
    # inform user of result
    ok = xbmcgui.Dialog().ok( __language__( 32000 ), __language__( message ) )
Пример #3
0
def initialize(context):
    # hook into the Control Panel
    global translation_service

    # allow for disabling PTS entirely by setting a environment variable.
    if bool(os.getenv('DISABLE_PTS')):
        log('Disabled by environment variable "DISABLE_PTS".', logging.WARNING)
        return

    cp = context._ProductContext__app.Control_Panel # argh
    if cp_id in cp.objectIds():
        cp_ts = getattr(cp, cp_id)
        # use the ts in the acquisition context of the control panel
        # translation_service = translation_service.__of__(cp)
        translation_service = PTSWrapper(cp_ts)
    else:
        cp_ts = make_translation_service(cp)

    # don't touch - this is the last version that didn't have the
    # attribute (0.4)
    instance_version = getattr(cp_ts, '_instance_version', (0, 4, 0, 0))
    if instance_version[3] > 99:
        log('development mode: translation service recreated',
            detail = '(found %s.%s.%s.%s)\n' % instance_version)
        cp._delObject(cp_id)
        cp_ts = make_translation_service(cp)

    if instance_version < PlacelessTranslationService._class_version:
        log('outdated translation service found, recreating',
            detail = '(found %s.%s.%s.%s)\n' % instance_version)
        cp._delObject(cp_id)
        purgeMoFileCache()
        cp_ts = make_translation_service(cp)

    # sweep products
    log('products: %r' % get_products(), logging.DEBUG)
    for prod in get_products():
        # prod is a tuple in the form:
        # (priority, dir_name, index, base_dir) for each Product directory
        cp_ts._load_i18n_dir(os.path.join(prod[3], prod[1], 'i18n'))
        cp_ts._load_locales_dir(os.path.join(prod[3], prod[1], 'locales'))

    # sweep the i18n directory for local catalogs
    instance_i18n = os.path.join(INSTANCE_HOME, 'i18n')
    if os.path.isdir(instance_i18n):
        cp_ts._load_i18n_dir(instance_i18n)

    instance_locales = os.path.join(INSTANCE_HOME, 'locales')
    if os.path.isdir(instance_locales):
        cp_ts._load_locales_dir(instance_locales)

    # didn't found any catalogs
    if not cp_ts.objectIds():
        log('no translations found!', logging.DEBUG)

    # set ZPT's translation service
    # NOTE: since this registry is a global var we can't register the
    #       persistent service itself (zodb connection) therefore a
    #       wrapper is created around it
    setGlobalTranslationService(PTSWrapper(cp_ts))
Пример #4
0
    def on_task_finished(self, task):
        task.sound_file.progress = 1.0

        if task.error:
            debug("error in task, skipping rename:", task.output_filename)
            if vfs_exists(task.output_filename):
                vfs_unlink(task.output_filename)
            self.errors.append(task.error)
            self.error_count += 1
            return

        duration = task.get_duration()
        if duration:
            self.duration_processed += duration

        # rename temporary file
        newname = self.window.prefs.generate_filename(task.sound_file)
        log(beautify_uri(task.output_filename), "->", beautify_uri(newname))

        # safe mode. generate a filename until we find a free one
        p, e = os.path.splitext(newname)
        p = p.replace("%", "%%")
        p = p + " (%d)" + e
        i = 1
        while vfs_exists(newname):
            newname = p % i
            i += 1

        task.error = vfs_rename(task.output_filename, newname)
        if task.error:
            self.errors.append(task.error)
            self.error_count += 1
Пример #5
0
def answer_from_div(div):
    a = Answer()
    a.author = div.xpath('.//a[@class="author-link"]')[0].text
    log('author,', a)
    content = div.xpath('.//div[@class="zm-editable-content clearfix"]/text()')
    a.content = '\n'.join(content)
    return a
Пример #6
0
def main():
    if not len(sys.argv) == 2:
        usage()

    if not config.enabled.lower() == "true":
        fatal('ebsmount is not enabled (%s)' % config.CONF_FILE)

    action = sys.argv[1]
    DEVNAME = os.getenv('DEVNAME', None)
    PHYSDEVPATH = os.getenv('PHYSDEVPATH', None)

    if not action in ('add', 'remove'):
        usage('action must be one of: add, remove')

    if not DEVNAME:
        usage('DEVNAME is required')

    if not PHYSDEVPATH:
        usage('PHYSDEVPATH is required')

    if not PHYSDEVPATH.startswith('/devices/xen/vbd-'):
        usage('PHYSDEVPATH is not of the expected structure')

    # log trigger
    log(DEVNAME, "received %s trigger" % action)

    mountdir = os.path.join(config.mountdir, os.path.basename(PHYSDEVPATH))
    func = getattr(ebsmount, 'ebsmount_' + action)
    func(DEVNAME, mountdir)
    def getImagesMjpegInterlace(self, camera, url, control, prefix):
        """ Update camera position with interlaced mjpeg frames """

        try:
            stream = requests.get(url, stream=True, timeout=TIMEOUT).raw

        except requests.RequestException as e:
            utils.log(3, e)
            control[0].setImage(_error, useCache=False)
            return

        x = 0
        while not monitor.abortRequested() and self.isRunning:

            filename = os.path.join(_datapath, "%s_%s.%d.jpg") % (prefix, camera.number, x)
            filename_exists = utils.get_mjpeg_frame(stream, filename)

            if filename_exists:
                if x % 2 == 0:  # Interlacing for flicker reduction/elimination
                    control[0].setImage(filename, useCache=False)
                else:
                    control[1].setImage(filename, useCache=False)
                xbmcvfs.delete(os.path.join(_datapath, "%s_%s.%d.jpg") % (prefix, camera.number, x - 2))
                x += 1

            else:
                utils.log(3, "Camera %s - Error on MJPEG" % camera.number)
                control[0].setImage(_error, useCache=False)
                return
Пример #8
0
 def delete_msgs_wrapper(self, service, msgIds=[], user_id=''):
     utils.log( 'Delete ' + str(len(msgIds)) + ' messages')
     for msgId in msgIds:
         self.delete_msg(service, user_id, msgId)
         print '*',
         
     utils.log('Done')
Пример #9
0
def ebsmount_add(devname, mountdir):
    """ebs device attached"""

    matching_devices = []
    for device in udevdb.query():
        if device.name.startswith(basename(devname)):
            matching_devices.append(device)

    for device in matching_devices:
        devpath = join('/dev', device.name)
        mountpath = join(mountdir, device.env.get('ID_FS_UUID', devpath[-1])[:6])
        mountoptions = ",".join(config.mountoptions.split())

        filesystem = device.env.get('ID_FS_TYPE', None)
        if not filesystem:
            log(devname, "could not identify filesystem: %s" % devpath)
            continue

        if not filesystem in config.filesystems.split():
            log(devname, "filesystem (%s) not supported: %s" % (filesystem,devpath))
            continue

        if is_mounted(devpath):
            log(devname, "already mounted: %s" % devpath)
            continue

        mount(devpath, mountpath, mountoptions)
        if exists(config.postmountscript):
          log(devname, "Executing: %s -m %s -d %s" % (config.postmountscript, mountpath, devname))
          res = system("exec '%s' -m %s -d %s" % (config.postmountscript, mountpath, devname))
        else:
          log(devname, "Script does not exist at %s" % config.postmountscript)
Пример #10
0
    def import_into_db(self, full_path):
        temp_path = os.path.join(xbmc.translatePath("special://profile"), 'temp_import_%s.csv' % (int(time.time())))
        utils.log('Copying import file from: |%s| to |%s|' % (full_path, temp_path), xbmc.LOGDEBUG)
        if not xbmcvfs.copy(full_path, temp_path):
            raise Exception('Import: Copy from |%s| to |%s| failed' % (full_path, temp_path))

        try:
            with open(temp_path, 'r') as f:
                    reader = csv.reader(f)
                    mode = ''
                    _ = f.readline()  # read header
                    for line in reader:
                        if CSV_MARKERS.FAVORITES in line[0] or CSV_MARKERS.SUBSCRIPTIONS in line[0] or CSV_MARKERS.BOOKMARKS in line[0] or CSV_MARKERS.EXT_SUBS in line[0]:
                            mode = line[0]
                            continue
                        elif mode == CSV_MARKERS.FAVORITES:
                            try:
                                self.save_favorite(line[0], line[1], line[2], line[3])
                            except: pass  # save_favorite throws exception on dupe
                        elif mode == CSV_MARKERS.SUBSCRIPTIONS:
                            # don't allow import of days with values other than 0-6
                            if line[5].translate(None, '0123456'): line[5] = '0123456'
                            self.add_subscription(line[0], line[1], line[2], line[3], line[4], line[5])
                        elif mode == CSV_MARKERS.BOOKMARKS:
                            self.set_bookmark(line[0], line[1])
                        elif mode == CSV_MARKERS.EXT_SUBS:
                            self.add_ext_sub(line[0], line[1], line[2], line[3])
                        else:
                            raise Exception('CSV line found while in no mode')
        finally:
            if not xbmcvfs.delete(temp_path):
                raise Exception('Import: Delete of %s failed.' % (temp_path))
Пример #11
0
def delete_temp_files():
    log('Deleting temporary files...')

    os.remove(settings.BROKEN_CSV_FILE_NAME)
    os.remove(settings.FIXED_CSV_FILE_NAME)

    log('Done')
Пример #12
0
def main():
    parser = argparse.ArgumentParser(description="Retrieve petitions from We The People")
    parser.add_argument(
        "-m",
        "--max",
        metavar="INTEGER",
        dest="max",
        type=int,
        default=None,
        help="maximum number of petitions to retrieve",
    )
    parser.add_argument(
        "-s",
        "--start",
        metavar="INTEGER",
        dest="start",
        type=int,
        default=1,
        help="starting page, 20 per page, default is 1",
    )
    args = parser.parse_args()

    if args.max is not None and args.max < 1:
        parser.error("How can I scrape less than one petition? You make no sense! --max must be one or greater.")

    if args.start < 1:
        parser.error("--start must be one or greater.")

    log("Found %i petitions" % (petitions(args.start, args.max)))

    # write log
    scrapelog["end"] = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
    write(json.dumps(scrapelog, indent=2), "log-wh-" + scrapelog["begin"] + ".json", log_dir())
Пример #13
0
    def export_from_db(self, full_path):
        temp_path = os.path.join(xbmc.translatePath("special://profile"), 'temp_export_%s.csv' % (int(time.time())))
        with open(temp_path, 'w') as f:
            writer = csv.writer(f)
            f.write('***VERSION: %s***\n' % self.__get_db_version())
            if self.__table_exists('favorites'):
                f.write(CSV_MARKERS.FAVORITES + '\n')
                for fav in self.get_favorites():
                    writer.writerow(fav)
            if self.__table_exists('subscriptions'):
                f.write(CSV_MARKERS.SUBSCRIPTIONS + '\n')
                for sub in self.get_subscriptions():
                    writer.writerow(sub)
            if self.__table_exists('new_bkmark'):
                f.write(CSV_MARKERS.BOOKMARKS + '\n')
                for bookmark in self.get_bookmarks():
                    writer.writerow(bookmark)
            if self.__table_exists('external_subs'):
                f.write(CSV_MARKERS.EXT_SUBS + '\n')
                for sub in self.get_external_subs():
                    writer.writerow(sub)

        utils.log('Copying export file from: |%s| to |%s|' % (temp_path, full_path), xbmc.LOGDEBUG)
        if not xbmcvfs.copy(temp_path, full_path):
            raise Exception('Export: Copy from |%s| to |%s| failed' % (temp_path, full_path))

        if not xbmcvfs.delete(temp_path):
            raise Exception('Export: Delete of %s failed.' % (temp_path))
Пример #14
0
def parse(queue):
    utils.log('start...')
    request = queue.get()
    success, response = page_hybridflightresults_aspx(request)
    print success, response
    utils.log('done')
    queue.put({ 'success': success, 'flags':['no_range'], 'response': response })
Пример #15
0
def main():
    if not len(sys.argv) == 2:
        usage()

    if not config.enabled.lower() == "true":
        fatal('ebsmount is not enabled (%s)' % config.CONF_FILE)

    action = sys.argv[1]
    devname = os.getenv('DEVNAME', None)
    devpath = os.getenv('PHYSDEVPATH', os.getenv('DEVPATH', None))

    if not action in ('add', 'remove'):
        usage('action must be one of: add, remove')

    if not devname:
        usage('DEVNAME is required')

    if not devpath:
        usage('PHYSDEVPATH or DEVPATH is required')

    if not _expected_devpath(devpath, config.devpaths.split()):
        usage('PHYSDEVPATH/DEVPATH is not of the expected structure')

    # log trigger
    log(devname, "received %s trigger" % action)

    func = getattr(ebsmount, 'ebsmount_' + action)
    func(devname, config.mountdir)
Пример #16
0
def build_xsl_reports( 
          locate_root_dir
        , tag
        , expected_results_file
        , failures_markup_file
        , comment_file
        , results_dir
        , result_file_prefix
        , dont_collect_logs = 0
        , reports = report_types
        , warnings = []
        , user = None
        , upload = False
        ):

    ( run_date ) = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.gmtime() )

    root_paths.append( locate_root_dir )
    root_paths.append( results_dir )
    
    bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
    
    output_dir = os.path.join( results_dir, result_file_prefix )
    utils.makedirs( output_dir )
    
    if expected_results_file != '':
        expected_results_file = os.path.abspath( expected_results_file )
    else:
        expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )


    extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
        
    execute_tasks(
          tag
        , user
        , run_date
        , comment_file
        , results_dir
        , output_dir
        , reports
        , warnings
        , extended_test_results
        , dont_collect_logs
        , expected_results_file
        , failures_markup_file
        )

    if upload:
        upload_dir = 'regression-logs/'
        utils.log( 'Uploading  results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
        
        archive_name = '%s.tar.gz' % result_file_prefix
        utils.tar( 
              os.path.join( results_dir, result_file_prefix )
            , archive_name
            )
        
        utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
        utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )
 def _get_totals(self):
     utils.log('calculating totals')
     df = self.df[self.independent_vars].sum().to_frame('total')
     df = df.set_value('tds_0','total',None)
     df = df.set_value('tds_1','total',None)
     df = df.reset_index().rename(columns={'index':'independent_variable'})
     return df
Пример #18
0
 def save(self):
     show_name = None
     if self.rageid is not None:
         show_name = self.rageid_show_name(self.rageid)
         if show_name is not None:
             show_name = unicode(show_name, 'utf-8')
     if show_name is None:
         manual_name = unikeyboard(self.nzbname, 'Enter show name')
         if manual_name is None:
             log("Tvshow: save: did not recieve a name for the TV-show")
             return
         #show_name = manual_name.decode("utf_8").encode("raw_unicode_escape")
         show_name = unicode(manual_name, 'utf-8').replace('\n','')
     strm_path_show = utils.join(self.strm_path, os.path.join(remove_disallowed_filename_chars(show_name),''))
     # Check if showname folder exist in path, if not create it.
     if not utils.exists(strm_path_show):
         try:
             utils.mkdir(strm_path_show)
         except:
             log("Tvshow: save: failed to create TV-show folder %s" % strm_path_show)
             return
     # Check if tvshow.nfo is there, if not, create it.
     tv_nfo = self.info
     tv_nfo.path(strm_path_show)
     # The Episode name has to be picked up by XBMC
     # regexps
     episode_name = self.check_episode_name(self.nzbname)
     if not self.save_nfo_type == "disabled":
         if self.save_nfo_type == "minimal":
             tv_nfo.mini()
         if not utils.exists(os.path.join(strm_path_show, 'tvshow.nfo')):
             tv_nfo.save_tvshow(show_name)
         # now, save the episodename.nfo
         tv_nfo.save_episode(episode_name)
     strm.StrmFile(strm_path_show, episode_name, self.nzb).save()
Пример #19
0
def ftp_task( site, site_path , destination ):
    __log__ = 1
    utils.log( '' )
    utils.log( 'ftp_task: "ftp://%s/%s" -> %s' % ( site, site_path, destination ) )

    utils.log( '    logging on ftp site %s' % site )
    f = ftplib.FTP( site )
    f.login()
    utils.log( '    cwd to "%s"' % site_path )
    f.cwd( site_path )

    source_content = list_ftp( f )
    source_content = [ x for x in source_content if re.match( r'.+[.](?<!log[.])zip', x.name ) and x.name.lower() != 'boostbook.zip' ]
    destination_content = list_dir( destination )
    d = diff( source_content, destination_content )

    def synchronize():
        for source in d[0]:
            utils.log( 'Copying "%s"' % source )
            result = open( os.path.join( destination, source ), 'wb' )
            f.retrbinary( 'RETR %s' % source, result.write )
            result.close()
            mod_date = find_by_name( source_content, source ).date
            m = time.mktime( mod_date )
            os.utime( os.path.join( destination, source ), ( m, m ) )

        for obsolete in d[1]:
            utils.log( 'Deleting "%s"' % obsolete )
            os.unlink( os.path.join( destination, obsolete ) )

    utils.log( "    Synchronizing..." )
    __log__ = 2
    synchronize()
    
    f.quit()        
Пример #20
0
def the_end_dialog(params, **kwargs):
    log("the_end_dialog: params: %s kwargs: %s" %(params, kwargs))
    dialog = xbmcgui.Dialog()
    if 'is_stopped' in kwargs:
        is_stopped = kwargs['is_stopped']
    else:
        is_stopped = False
    if 'progressing' in kwargs:
        progressing = kwargs['progressing']
    else:
        progressing = False
    if progressing:
        options = ['Delete', 'Just download']
        if is_stopped:
            heading = 'Downloading, what do you want to do?'
        else:
            heading = 'Still downloading, what do you want to do?'
    else:
        heading = 'Download finished, what do you want to do?'
        options = ['Delete', 'Repair']
    ret = dialog.select(heading, options)
    if ret == 0:
        delete(params)
    if ret == 1 and progressing:
        just_download(params)
    elif ret == 1 and not progressing:
        repair(params)
    return
Пример #21
0
def save_strm(nzbname, url):
    log("save_strm: nzbname: %s url: %s" % (nzbname, url))
    strm2lib.save_strm(__settings__, nzbname, url)
    if SAVE_NZB and utils.exists(NZB_CACHE):
        nzb_path = utils.join(NZB_CACHE, '%s%s' % (nzbname, '.nzb'))
        log("save_strm: nzb_path: %s" % nzb_path)
        m_nzb.save(url, nzb_path)
Пример #22
0
    def connect_to_sql(self, sql_connect, db_name="", force_reconnect=False, create_db=True):
        """
        Connect to SQL database or create the database and connect
        :param sql_connect: the variable to set
        :param db_name: the name of the database
        :param force_reconnect: force the database connection
        :param create_db: create the database
        :return the created SQL connection
        """
        print self
        if sql_connect is None or force_reconnect:
            try:
                sql_connect = MySQLdb.connect(host=config.SQL_HOST, user=config.SQL_USERNAME, passwd=config.SQL_PASSWORD, db=db_name)
                return sql_connect
            except Exception, e:
                # Create the database
                if e[0] and create_db and db_name != "":
                    if sql_connect is None:
                        sql_connect = MySQLdb.connect(host=config.SQL_HOST, user=config.SQL_USERNAME, passwd=config.SQL_PASSWORD)
                    utils.log("Creating database " + db_name)

                    cur = sql_connect.cursor()
                    cur.execute("CREATE DATABASE " + db_name)
                    sql_connect.commit()
                    sql_connect.select_db(db_name)
                    return sql_connect
                else:
                    utils.log("Could not connect to MySQL: %s" % e)
                    return None
Пример #23
0
def ensure_mongo_indexes():
    """
    Runs commands on the mongo indexes to ensure that they are set
    :return: None
    """
    utils.log("Setting index for countries")
    cmd = config.MONGO_PATH + "mongo  --quiet " + config.MONGO_HOST + "/logs --eval \"db.clickstream.ensureIndex({country:1})\""
Пример #24
0
 def _run(self):
     utils.log("[%s] pulling from source '%s'" % (self.name, self._source.name))
     
     self._source.startProducing()
     self.processQueue(self._source._output)
     self._source.join()
     self._output.put(StopIteration)
Пример #25
0
def crossValidate(opts):
	"""
	Starts the tree cross validation.
	The subset tree has a depth defined by the the division-depth opttion.
	Each dataset will be split into a number of subsets defined by the division-factor option.
	
	@type	opts: Options
	@param	opts: Options object
	"""
	totaltime = time.time()
	opts.training_file = scale(opts, opts.training_file)
	log("Scaling input data")
	while opts.division_depth >= 0:
		starttime = time.time()
		log("Range: C={0}; gamma={1}".format(opts.c_range,opts.g_range))
		res = crossValidationRound(opts)
		cstart,cstop,cstep = opts.c_range
		gstart,gstop,gstep = opts.g_range
		cmin,cmax,gmin,gmax = calcNewRange(res, opts)
		opts.division_depth = opts.division_depth -1
		opts.c_range = cmin,cmax,cstep
		opts.g_range = gmin,gmax,gstep
		endtime = time.time()
		log("Time for this round: {0}".format(time.strftime("%H:%M:%S", time.gmtime(endtime-starttime))))
		log("Remaining division steps: {0}".format(opts.division_depth+1))
		
	log("Total time: {}".format(time.strftime("%H:%M:%S", time.gmtime(time.time()-totaltime))))
			
Пример #26
0
def get_latest_version():
    """
        Sort the list, and get the latest version
    """
    versions = get_versions()
    utils.log('Version check found versions: %s' % versions)
    return sorted(versions, reverse=True)[0]
Пример #27
0
def upload_log():
    """
        Upload our full XBMC log as a GitHub gist
    """
    try:
        log_content = get_xbmc_log()
    except Exception as e:
        utils.log("Failed to read log: %s" % e)
        return None

    utils.log("Uploading xbmc.log")
    try:
        response = urllib2.urlopen(make_request(config.GIST_API_URL), json.dumps({
            "files": {
                "xbmc.log": {
                    "content": log_content
                }
            }
        }))
    except urllib2.HTTPError as e:
        print e
        utils.log("Failed to save log: HTTPError %s" % e.code)
        return False
    except urllib2.URLError as e:
        print e
        utils.log("Failed to save log: URLError %s" % e.reason)
        return False
    try:
        return json.load(response)["html_url"]
    except:
        utils.log("Failed to parse API response: %s" % response.read())
Пример #28
0
	def process(self, message):
		
		end = message.get('end')
		start = message.get('start')
		startDoy = message.get('startDoy')
		startYear = message.get('startYear')
		layer = message.get('layer')
		region = message.get('region')
		productName = message.get('productName').replace('.','_')

		outputFilename = "_".join([productName, layer['name'], startYear + startDoy , region]) + '.tif'
		outputFilepath = os.path.join(self.module_path, outputFilename)

		utils.removeFileIfExist(outputFilepath)

		utils.log(self.name, 'Generating', outputFilename, 'merging', str(len(layer['files'])), 'files')
		gdal_utils.mosaic(layer['files'], outputFilepath, layer['nodata'])

		tmpFiles = message.get('tmpFiles')
		tmpFiles += layer['files']
		message.set('tmpFiles', tmpFiles)

		del layer['files']
		layer['file'] = outputFilepath
		message.set('layer', layer)

		utils.log(self.name, 'Forward message (', outputFilename, ')')
		self.publish(message)
Пример #29
0
    def setup(self):
        if(APP_KEY == '' or APP_SECRET == ''):
            xbmcgui.Dialog().ok(utils.getString(30010),utils.getString(30058),utils.getString(30059))
            return
        
        user_token_key,user_token_secret = self.getToken()
        
        sess = session.DropboxSession(APP_KEY,APP_SECRET,"app_folder")

        if(user_token_key == '' and user_token_secret == ''):
            token = sess.obtain_request_token()
            url = sess.build_authorize_url(token)

            #print url in log
            utils.log("Authorize URL: " + url)
            xbmcgui.Dialog().ok(utils.getString(30010),utils.getString(30056),utils.getString(30057))  
            
            #if user authorized this will work
            user_token = sess.obtain_access_token(token)
            self.setToken(user_token.key,user_token.secret)
            
        else:
            sess.set_token(user_token_key,user_token_secret)
        
        self.client = client.DropboxClient(sess)

        try:
            utils.log(str(self.client.account_info()))
        except:
            #this didn't work, delete the token file
            self.deleteToken()
Пример #30
0
def playlist_item(play_list, rar_file_list, folder, sab_nzo_id, sab_nzo_id_history):
    log("playlist_item: play_list: %s rar_file_list: %s folder: %s sab_nzo_id: %s sab_nzo_id_history: %s" %\
       (play_list, rar_file_list, folder, sab_nzo_id, sab_nzo_id_history))
    new_play_list = play_list[:]
    for arch_rar, movie_file in zip(play_list[0::2], play_list[1::2]):
        info = nfo.ReadNfoLabels(folder)
        xurl = "%s?mode=%s" % (sys.argv[0],MODE_LIST_PLAY)
        url = (xurl + "&nzoid=" + str(sab_nzo_id) + "&nzoidhistory=" + str(sab_nzo_id_history)) +\
              "&play_list=" + utils.quote_plus(';'.join(new_play_list)) + "&folder=" + utils.quote_plus(folder) +\
              "&file_list=" + utils.quote_plus(';'.join(rar_file_list))
        new_play_list.remove(arch_rar)
        new_play_list.remove(movie_file)
        item = xbmcgui.ListItem(movie_file, iconImage='DefaultVideo.png', thumbnailImage=info.thumbnail)
        item.setInfo(type="Video", infoLabels=info.info_labels)
        item.setProperty("Fanart_Image", info.fanart)
        item.setPath(url)
        isfolder = False
        # item.setProperty("IsPlayable", "true")
        cm = []
        if sab_nzo_id_history:
            cm_url_repair = sys.argv[0] + '?' + "mode=repair" + "&nzoidhistory=" + str(sab_nzo_id_history) + "&folder=" + utils.quote_plus(folder)
            cm.append(("Repair" , "XBMC.RunPlugin(%s)" % (cm_url_repair)))
        cm_url_delete = sys.argv[0] + '?' + "mode=delete" + "&nzoid=" + str(sab_nzo_id) + "&nzoidhistory=" + str(sab_nzo_id_history) + "&folder=" + utils.quote_plus(folder)
        cm.append(("Delete" , "XBMC.RunPlugin(%s)" % (cm_url_delete)))
        item.addContextMenuItems(cm, replaceItems=True)
        xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=isfolder)
    xbmcplugin.setContent(HANDLE, 'movies')
    xbmcplugin.endOfDirectory(HANDLE, succeeded=True, cacheToDisc=True)
    return
Пример #31
0
 def exception_dialog(self, error):
     if debug.get() == False:
         log(error, xbmc.LOGERROR)
     dialog.error(error)
Пример #32
0
def comment_delete(request):
    comment_id = int(request.query['id'])
    Comment.delete(comment_id)
    log('删除的评论id', comment_id)
    d = dict(message="成功删除 comment")
    return json_response(d)
Пример #33
0
import utils
import conexion
import cliente

log = utils.log("INIT")
log.info("inicio del programa")
lstCliente = []
lstTipoPago = []
lstEmpresa = []
lstProductos = []


def cargarObjetos():
    conn = conexion.conexionBDD(1)
    query = "select idCliente, nombreCliente as Nombre, nroIdentCliente as ID, direccionCliente as Direccion from cliente;"
    resconn = conn.consultarBDD(query)
    for row in resconn:
        cliente = cliente.cliente(row[0], row[1], row[2], row[3])
        lstClientes.append(cliente)

    for obj in lstCliente:
        print(obj.nombreCliente)
    input("continuar")


def mantenimientoCliente():
    dicMenuCliente = {
        "\t- Buscar Cliente Todos": 1,
        "\t- Buscar Cliente por DNI": 2,
        "\t- Modificar Cliente por DNI": 3,
        "\t- Crear Cliente": 4,
Пример #34
0
    def parse_feed(self, key, value, i):
        if self.source == "AP" or self.source == "Reuters":
            try:
                news_url = self.url % key

                if self.source == "Reuters":
                    news_url += "?since=" + str(
                        int((time.mktime(datetime.utcnow().timetuple()))) *
                        10000000000)

                feed = requests.get(news_url).json(
                )  # we use AP's API to download their news, it's epic and it uses JSON
            except:
                return i
        elif self.source == "AFP_French" or self.source == "ANP":
            feed = feedparser.parse(self.url)
        elif self.source == "ANSA" and value == "italy":
            feed = feedparser.parse(self.sourceinfo["url2"] % (key, key))
        elif self.source == "ANSA":
            feed = feedparser.parse(self.url % (key, key))
        else:
            feed = feedparser.parse(self.url % key)

        j = 0

        if self.source == "AP":
            entries = feed["cards"]
        elif self.source == "Reuters":
            entries = feed["wireitems"]
        elif self.source == "AFP_French":
            entries = feed.entries + feedparser.parse(
                self.sourceinfo["url2"]).entries
        else:
            entries = feed.entries

        for entry in entries:
            try:
                if self.source == "AP":
                    try:
                        entry = entry["contents"][0]
                    except:
                        continue
                elif self.source == "Reuters":
                    try:
                        _ = entry["templates"][1]["story"]["hed"]
                        entry = entry["templates"][1]
                    except:
                        continue

                current_time = int(
                    (time.mktime(datetime.utcnow().timetuple()) - 946684800) /
                    60)

                if self.source == "AP":
                    update = time.strptime(entry["updated"],
                                           "%Y-%m-%d %H:%M:%S")
                elif self.source == "Reuters":
                    update = time.strptime(entry["story"]["updated_at"],
                                           "%Y-%m-%dT%H:%M:%SZ")
                else:
                    update = entry["updated_parsed"]

                updated_time = int((time.mktime(update) - 946684800) / 60)

                if self.source == "AFP_French" and current_time - updated_time < 0:
                    updated_time -= 180

                if (current_time - updated_time <
                        60):  # if it's a new article since the last hour
                    i += 1
                    j += 1

                    if (
                            i > 25
                    ):  # in case we have too many articles, we don't want the news file to get too big, there's a limit
                        break

                    if self.source == "AFP_French" or self.source == "ANP_Dutch":
                        if key not in entry["link"]:
                            continue
                    elif self.source == "AFP" and "SID" in entry["description"]:
                        self.source = "SID"
                    elif self.source == "NU.nl" and entry["author"] == "ANP":
                        self.source = "ANP"

                    if self.source == "AP":
                        title = entry["headline"]
                    elif self.source == "Reuters":
                        title = entry["story"]["hed"]
                    else:
                        title = entry["title"]

                    print(title)

                    if self.source == "AP":
                        entry_url = entry["gcsUrl"]
                    elif self.source == "Reuters":
                        entry_url = self.url[:30] + entry["template_action"][
                            "api_path"]
                    else:
                        entry_url = entry["link"]

                    downloaded_news = Parse(entry_url, self.source,
                                            updated_time, title,
                                            self.language).get_news()

                    if downloaded_news:
                        self.newsdata[value + str(j)] = downloaded_news
            except Exception as e:
                ex = "Failed to parse feed - line {}: {}".format(
                    sys.exc_info()[-1].tb_lineno, str(e))
                print(ex)
                log(ex, "INFO")
                continue

        return i
Пример #35
0
def locations_download(
    language_code, data
):  # using Google Maps API is so much better than the crap Nintendo used for say, AP news.
    locations = {}
    gmaps = googlemaps.Client(key=config["google_maps_api_key"])

    languages = {  # corresponds to the Wii's language codes
        0: "ja",
        1: "en",
        2: "de",
        3: "fr",
        4: "es",
        5: "it",
        6: "nl",
    }

    for keys, values in list(data.items()):
        location = values[7]

        if location and location != "":
            if location not in locations:
                locations[location] = [None, None, []]

            locations[location][2].append(keys)

    for name in list(locations.keys()):
        if name == "":
            continue

        uni_name = (
            name if languages[language_code] == "ja" else unidecode(name)
        )  # if using unidecode with Japanese, it'll translate all the characters to English

        print(uni_name)

        coordinates = None

        if name not in cities:
            try:
                read = gmaps.geocode(uni_name,
                                     language=languages[language_code])
                loc_name = read[0]["address_components"][0]["long_name"]

                if languages[language_code] == "ja":
                    loc_name = enc(loc_name)
                else:
                    loc_name = enc(unidecode(loc_name))
                """Not doing anything with these."""

                country = u8(0)
                region = u8(0)
                location = u16(0)
                zoom_factor = u32_littleendian(
                    6
                )  # Nintendo used the value of 3 for states and countries but we probably don't have any articles that are just states or countries

                coordinates = (
                    s16(
                        int(read[0]["geometry"]["location"]["lat"] /
                            (360 / 65536))) + s16(
                                int(read[0]["geometry"]["location"]["lng"] /
                                    (360 / 65536))) + country + region +
                    location + zoom_factor
                )  # latitude and longitude is divided by the value of 360 (degrees of a full circle) divided by the max int for a 16-bit int
            except Exception as e:
                ex = "There was a error downloading the location data - line {}: {}".format(
                    sys.exc_info()[-1].tb_lineno, str(e))
                print(ex)
                log(ex, "INFO")

        else:
            coordinates = binascii.unhexlify(cities[name][0] +
                                             "0000000006000000")
            loc_name = enc(cities[name][1])

        if locations[name][0] is None and coordinates is not None:
            locations[name][0] = coordinates
        else:
            del locations[name]
            continue

        if locations[name][1] is None:
            locations[name][1] = loc_name

    return locations
Пример #36
0
def log(msg):
    utils.log(utils.log_level_debug, TAG, msg)
Пример #37
0
def start(bot, update):
    bot.send_message(chat_id=update.message.chat_id, text="Test if it works")
    try:
        log(update.message.text.split(' ', 1)[1], 'info')
    except IndexError:
        log('No input given.', 'info')
Пример #38
0
 def update( self ):
     try:
         utils.log( '  Unzipping "%s" ... into "%s"' % ( shorten( self.source_ ), os.path.dirname( self.file_path_ ) ) )
         self.unzip_func_( self.source_, os.path.dirname( self.file_path_ ) )
     except Exception, msg:
         utils.log( '  Skipping "%s" due to errors (%s)' % ( self.source_, msg ) )
Пример #39
0
    def run( self ):
        utils.log( "%s: run" % shorten( self.file_path_ ) )
        __log__ = 2

        for dependency in self.dependencies_:
            if not os.path.exists( dependency ):
                utils.log( "%s doesn't exists, removing target" % shorten( dependency ) )
                self.clean()
                return

        if not os.path.exists( self.file_path_ ):
            utils.log( "target doesn't exists, building" )            
            self.update()
            return

        dst_timestamp = _modtime_timestamp( self.file_path_ )
        utils.log( "    target: %s [%s]" % ( shorten( self.file_path_ ),  dst_timestamp ) )
        needs_updating = 0
        utils.log( "    dependencies:" )
        for dependency in  self.dependencies_:
            dm = _modtime_timestamp( dependency )
            update_mark = ""
            if dm > dst_timestamp:
                needs_updating = 1
            utils.log( '        %s [%s] %s' % ( shorten( dependency ), dm, update_mark ) )
            
        if  needs_updating:
            utils.log( "target needs updating, rebuilding" )            
            self.update()
            return
        else:
            utils.log( "target is up-to-date" )            
Пример #40
0
def build_reports( 
          locate_root_dir
        , tag
        , expected_results_file
        , failures_markup_file
        , comment_file
        , results_dir
        , result_file_prefix
        , dont_collect_logs = 0
        , reports = report_types
        , report_executable = None
        , warnings = []
        , user = None
        , upload = False
        , filter_runners = None
        ):

    ( run_date ) = time.strftime( '%Y-%m-%dT%H:%M:%SZ', time.gmtime() )

    root_paths.append( locate_root_dir )
    root_paths.append( results_dir )
    
    bin_boost_dir = os.path.join( locate_root_dir, 'bin', 'boost' )
    
    output_dir = os.path.join( results_dir, result_file_prefix )
    utils.makedirs( output_dir )
    
    if expected_results_file != '':
        expected_results_file = os.path.abspath( expected_results_file )
    else:
        expected_results_file = os.path.abspath( map_path( 'empty_expected_results.xml' ) )


    extended_test_results = os.path.join( output_dir, 'extended_test_results.xml' )
    
    if filter_runners == None:
        if default_filter_runners.has_key(tag):
            filter_runners = default_filter_runners[tag]
        
    execute_tasks(
          tag
        , user
        , run_date
        , comment_file
        , results_dir
        , output_dir
        , reports
        , warnings
        , extended_test_results
        , dont_collect_logs
        , expected_results_file
        , failures_markup_file
        , report_executable
        , filter_runners
        )

    if upload:
        upload_dir = 'regression-logs/'
        utils.log( 'Uploading  results into "%s" [connecting as %s]...' % ( upload_dir, user ) )
        
        archive_name = '%s.tar.gz' % result_file_prefix
        utils.tar( 
              os.path.join( results_dir, result_file_prefix )
            , archive_name
            )
        
        utils.sourceforge.upload( os.path.join( results_dir, archive_name ), upload_dir, user )
        utils.sourceforge.untar( os.path.join( upload_dir, archive_name ), user, background = True )
Пример #41
0
def image(filename):
    log('images path', filename)
    return send_from_directory('images', filename)
Пример #42
0
 def clean( self ):
     to_unlink = self.other_results_ + [ self.file_path_ ]
     for result in to_unlink:
         utils.log( '  Deleting obsolete "%s"' % shorten( result ) )
         if os.path.exists( result ):
             os.unlink( result )
Пример #43
0
def cross_validate_model_fold(chunk_input: WorkerInput) -> ModelResult:
    log("Execution fold", level=2)
    timer = Timer()
    classifier = chunk_input['classifier']
    X_train = chunk_input['X_train']
    y_train = chunk_input['y_train']
    X_test = chunk_input['X_test']
    return_model = chunk_input['return_model']

    if get_log_level() == 1:
        print(".")

    feature_names = \
        chunk_input['feature_names'] if \
            ('feature_names' in chunk_input and chunk_input['feature_names'] is not None) \
            else list(X_train.columns)

    classifier.fit(X_train, y_train, **chunk_input['fit_kwargs'])

    y_predict = Series(classifier.predict(X_test), index=X_test.index)
    y_train_predict = Series(classifier.predict(X_train), index=X_train.index)

    try:
        y_predict_probabilities_raw = classifier.predict_proba(X_test)
        y_train_predict_probabilities_raw = classifier.predict_proba(X_train)
    except AttributeError:
        y_predict_probabilities = y_predict
        y_train_predict_probabilities = y_train_predict
    else:
        probability_columns = [
            f'y_predict_probabilities_{i}'
            for i in range(y_predict_probabilities_raw.shape[1])
        ]
        y_predict_probabilities = DataFrame(y_predict_probabilities_raw,
                                            index=X_test.index,
                                            columns=probability_columns)
        y_train_predict_probabilities = DataFrame(
            y_train_predict_probabilities_raw,
            index=X_train.index,
            columns=probability_columns)

    if y_predict.dtype == np.float:
        y_predict = y_predict \
            .map(lambda v: 0 if v < 0 else v) \
            .map(lambda v: 1 if v > 1 else v) \
            .map(lambda v: round(v))

    try:
        feature_importance = Series(
            classifier[-1].feature_importances_,
            index=feature_names,
        )
    except (TypeError, AttributeError):
        try:
            classifier[-1].coef_
        except AttributeError:
            feature_importance = None
            logging.debug("No feature importance in the result")
        else:
            feature_importance = None
            # feature_importance = Series(classifier[-1].coef_[0], index=feature_names)

    if not return_model:
        try:
            classifier[-1].get_booster().__del__()
        except AttributeError:
            pass

    return ModelResult(y_test_score=y_predict_probabilities,
                       y_test_predict=y_predict,
                       y_train_predict=y_train_predict,
                       y_train_score=y_train_predict_probabilities,
                       feature_importance=feature_importance,
                       model=classifier[-1] if return_model else None,
                       elapsed=timer.elapsed_cpu())
Пример #44
0
   File Name:     main.py  
   Description :  运行主函数
   Author :       Sam
   date:         2017/7/21
-------------------------------------------------
"""

__author__ = 'Sam'

import logging
import os
import sys
import utils

from scrapy import cmdline

if __name__ == '__main__':
    reload(sys)
    sys.setdefaultencoding('utf-8')

    if not os.path.exists('log'):
        os.makedirs('log')

    logging.basicConfig(filename='log/item.log',
                        format='%(levelname)s %(asctime)s: %(message)s',
                        level=logging.DEBUG)

    utils.log('*******************run spider start...*******************')
    cmdline.execute('scrapy crawl category_urls'.split())
    #cmdline.execute('scrapy crawl item_list'.split())
Пример #45
0
if __name__ == '__main__':
    # Step 1. Configuration file
    config = parsing_configurations()

    # Create logger file
    training_loss_file = None
    evaluate_stat_file = None
    if config['LOG_FOLDER'] != '':
        if not os.path.exists(config['LOG_FOLDER']):
            os.makedirs(config['LOG_FOLDER'])
        training_loss_file = open(config['LOG_FOLDER'] + '/training_loss.txt',
                                  'w')
        evaluate_stat_file = open(config['LOG_FOLDER'] + '/evaluate_stat.txt',
                                  'w')
    log(config, training_loss_file)
    log(config, evaluate_stat_file)

    # Step 2. Create dataset loader
    train_dataloader, test_dataloader, val_dataloader = create_dataset_loader(
        config)

    # Step 3. Create cnn
    cnn = create_network(config)

    if config['CKPT_PATH'] is not '':
        print('Loading checkpoint from %s' % config['CKPT_PATH'])
        cnn.load_state_dict(torch.load(config['CKPT_PATH']))

    # Step 4. Create optimizer
    optimizer = None
Пример #46
0
        nExemplars=opt.shot,  # num training examples per novel category
        nTestNovel=opt.query *
        opt.way,  # num test examples for all the novel categories
        nTestBase=0,  # num test examples for all the base categories
        batch_size=1,
        num_workers=1,
        epoch_size=opt.episode,  # num of batches per epoch
    )

    # if opt.aws == 1:
    #     set_gpu(opt.gpu)

    test_loader = dloader_test.get_dataloader()

    log_file_path = os.path.join(os.path.dirname(opt.load), "test_log.txt")
    log(log_file_path, str(vars(opt)))

    # Define the models
    (embedding_net, cls_head) = get_model(opt)

    if opt.test_fusion_module == 1:
        fusion_mod = fuse_score(dc_factor=dc_factor,
                                weight_learn=opt.learn_fusion_weight).cuda()
    elif opt.test_fusion_module == 2:
        fusion_mod = fuse_prob(dc_factor=dc_factor,
                               weight_learn=opt.learn_fusion_weight).cuda()
    else:
        fusion_mod = fuse_score(dc_factor=dc_factor,
                                weight_learn=opt.learn_fusion_weight).cuda()

    # Load saved model checkpoints
Пример #47
0
def save(data, path):
    s = json.dumps(data, indent=2, ensure_ascii=False)
    with open(path, 'w+', encoding='utf-8') as f:
        log('save', path, s, data)
        f.write(s)
Пример #48
0
def compute_sequence(epoch, model, loss_fn, loader, optimizer=None, mode='eval', fh=None, backprop_batch_size=None, tolog=[],return_preds=False):
    global cuda
    #Pdb().set_trace() 
    if backprop_batch_size is None:
        backprop_batch_size = loader.batch_sampler.batch_size
    t1 = time.time()
    if mode == 'train':
        model.train()
    else:
        model.eval()

    last_print = 0
    count = 0
    num_words  = 0
    cum_loss = 0
    
    ypred_cum = []
    y_cum = []
    #idx_mask = []
    
    #variables to write output to a file in correct order
    if return_preds:
        #all_y= [None for i in  range(len(loader.dataset))]
        #all_x = [None for i in range(len(loader.dataset))]
        all_pred = [None for i in range(len(loader.dataset))]


    if mode == 'train':
        this_backprop_count = 0
        optimizer.zero_grad()
        backprop_loss = 0

    for x, y, idx in loader:
        # break
        count += len(idx)
        num_words += y.shape[0]*y.shape[1]
        # print(len(idx))
        #
        volatile = True
        if mode == 'train':
            this_backprop_count += len(idx)
            volatile = False

        x, y = Variable(x, volatile=volatile), Variable(
            y.long(), volatile=volatile)
        if cuda:
            x, y = x.cuda(), y.cuda()
        #
        loss,pred = loss_fn(x,y,model)
        #ypred = model(x)
         #loss = criterion(ypred.transpose(1,2), y)
        if mode == 'train':
            backprop_loss += loss
            if this_backprop_count >= backprop_batch_size:
                #utils.log("backproping now: {0}".format(this_backprop_count))
                backprop_loss.backward()
                # loss.backward()
                optimizer.step()
                this_backprop_count = 0
                backprop_loss = 0
                optimizer.zero_grad()
        #

        y = y.data.cpu().numpy()
        x = x.data.cpu().numpy()
        pred = pred.cpu().numpy()
        y_cum.extend(y.flatten())
        ypred_cum.extend(pred.flatten())
        cum_loss = cum_loss + loss.data[0]*y.shape[0]*y.shape[1]

        if return_preds:    
            for i in range(len(idx)):
                #all_y[idx[i]] = y[i,:]
                all_pred[idx[i]] = pred[i,:]
                #all_x[idx[i]] = x[i,:]


        

        if (count - last_print) >= 20000:
            last_print = count
            rec = [epoch, mode, 1.0 * cum_loss / num_words,
                    count, num_words, time.time() - t1] + tolog + stats_to_log(ypred_cum, y_cum)

            utils.log(','.join([str(round(x, 5)) if isinstance(
                x, float) else str(x) for x in rec]))

    
    rec = [epoch, mode, 1.0 * cum_loss / num_words,
                    count, num_words, time.time() - t1] + tolog + stats_to_log(ypred_cum, y_cum)

    utils.log(','.join([str(round(x, 5)) if isinstance(
        x, float) else str(x) for x in rec]), file=fh)

    if return_preds:
        return(rec,-1,all_pred)
    else:
        return (rec,-1)
Пример #49
0
def load(path):
    with open(path, 'r', encoding='utf-8') as f:
        s = f.read()
        log('load', s)
        return json.loads(s)
Пример #50
0
def handlerproxy(dispatcher):
    for c in Commands:
        handler_proxy = CommandHandler(c.value, getattr(commands, c.value))
        dispatcher.add_handler(handler_proxy)
        log('added: ' + c.value, 'info')
Пример #51
0
from scipy.interpolate import InterpolatedUnivariateSpline as spline
from scipy.optimize import curve_fit, bisect, brentq, least_squares
from scipy.interpolate import splrep, splev
from numpy import log, sqrt, exp, inf, nan, pi
from numpy.ctypeslib import ndpointer
from scipy.optimize import minimize
from scipy.stats import norm
from scipy import stats
from time import sleep
import concurrent.futures
import numpy as np
import ctypes
import utils
import os

cout = utils.log(__file__, __name__, disp=False)
N, n = norm.cdf, norm.pdf
np.seterr(divide='ignore')
np.warnings.filterwarnings('ignore')
base = 253

#------------------------------------------------------------------------------------------------
curr_dir = os.path.dirname(os.path.realpath(__file__))
lib = ctypes.cdll.LoadLibrary(f"{curr_dir}/KimIntegral.dll")


class KimIntegral:
    def __init__(self, R, S, K, T, s, r, q, H0, H1, Kh, M=12):
        R = 1 if R == "C" or R == "Call" or R == 1 else 0
        lib.KI.argtypes = [
            ctypes.c_int, ctypes.c_int, ctypes.c_double, ctypes.c_double,
Пример #52
0
 def all(cls):
     path = cls.db_path()
     models = load(path)
     log('models in all', models)
     ms = [cls.new(m) for m in models]
     return ms
Пример #53
0
def checkModels():
    log("checkModels(): %s" % 1)
    stanza.download('en')
Пример #54
0
def params_test(policy,
                env,
                n_steps=args.n_steps_test,
                ppo_epochs=args.ppo_epoch_test,
                clip_params=args.clip_param_test,
                gammas=args.gamma_test,
                lambdas=args.gae_lambda_test,
                loss_coefs=args.value_loss_coef_test,
                entropy_coefs=args.entropy_coef_test,
                lrs=args.lr_test):

    i = 0
    for n_step in n_steps:  #[512]
        for ppo_epoch in ppo_epochs:  #[4]
            for clip_param in clip_params:  #[0.1, 0.2]
                for gamma in gammas:  #[0.99, 0.9997]
                    for lb in lambdas:  #[0.95]
                        for value_loss_coef in loss_coefs:  #[0.5]
                            for entropy_coef in entropy_coefs:  #[0.01, 0.001]
                                for lr in lrs:  #[2.5e-4, 7e-4]

                                    i += 1

                                    # if i in [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 15, 16]: #Falta el 6 y el 14 por hacer completo
                                    #     pass
                                    # else:

                                    # if i == 15:
                                    #     print('Continue Training')
                                    #     trained_model = "/home/home/Data/Carmen/py_workspace/ObstacleTower_v3/python_scripts/Obstacle_Tower_Carmen_Raposo/results/June-17-2020_01_46AM/model/__15__study_0000350000.zip"
                                    #     model = PPO2.load(trained_model, env=env, tensorboard_log="/home/home/Data/Carmen/py_workspace/ObstacleTower_v3/python_scripts/Obstacle_Tower_Carmen_Raposo/results/June-17-2020_01_46AM/tensorboard/15/")
                                    #     t = 375000
                                    #     GLOBAL_PATH = "/home/home/Data/Carmen/py_workspace/ObstacleTower_v3/python_scripts/Obstacle_Tower_Carmen_Raposo/results/June-17-2020_01_46AM/model/"
                                    #     filename = 'argsparams' + str(i) + '.txt'
                                    #     os.makedirs(args.results_dir, exist_ok=True)
                                    #
                                    #
                                    # else:
                                    print(
                                        'Start Training: \n n_step: %f \n ppo_epoch: %f \n clip_param: %f \n gamma: %f'
                                        '\n lambda: %f \n value_loss_coef: %f \n entropy_coef: %f \n learning_rate : %f'
                                        % (n_step, ppo_epoch, clip_param,
                                           gamma, lb, value_loss_coef,
                                           entropy_coef, lr))
                                    #Fixed seed
                                    seed = random.seed(0)
                                    #env.seed(5)

                                    if args.use_gae_test:

                                        model = PPO2(
                                            policy,
                                            env,
                                            n_steps=n_step,
                                            verbose=1,
                                            tensorboard_log=args.
                                            tensorboard_logdir + str(i) + '/',
                                            cliprange=clip_param,
                                            learning_rate=lr,
                                            ent_coef=entropy_coef,
                                            vf_coef=value_loss_coef,
                                            max_grad_norm=args.max_grad_norm,
                                            gamma=gamma,
                                            lam=lb,
                                            noptepochs=ppo_epoch,
                                            seed=seed)
                                    else:

                                        model = PPO2(
                                            policy,
                                            env,
                                            n_steps=n_step,
                                            verbose=1,
                                            tensorboard_log=args.
                                            tensorboard_logdir + str(i) + '/',
                                            cliprange=clip_param,
                                            learning_rate=lr,
                                            ent_coef=entropy_coef,
                                            vf_coef=value_loss_coef,
                                            max_grad_norm=args.max_grad_norm,
                                            gamma=gamma,
                                            noptepochs=ppo_epoch,
                                            seed=seed)

                                    #Save the values of the configured parameters
                                    filename = 'argsparams' + str(i) + '.txt'
                                    os.makedirs(args.results_dir,
                                                exist_ok=True)
                                    myfile = open(args.results_dir + filename,
                                                  'w+')
                                    myfile.write(
                                        'n_step: %f \n ppo_epoch: %f \n clip_param: %f \n gamma: %f \n lambda: %f '
                                        '\n value_loss_coef: %f \n entropy_coef: %f \n learning_rate : %f'
                                        % (n_step, ppo_epoch, clip_param,
                                           gamma, lb, value_loss_coef,
                                           entropy_coef, lr))
                                    myfile.close()
                                    t = 0

                                    #t = 0
                                    while t < args.num_env_steps_test:
                                        # TRAIN MODEL
                                        try:
                                            if t == 0:
                                                model.learn(
                                                    total_timesteps=args.
                                                    eval_interval)

                                            else:
                                                model.learn(
                                                    total_timesteps=args.
                                                    eval_interval,
                                                    reset_num_timesteps=False)

                                            os.makedirs(GLOBAL_PATH,
                                                        exist_ok=True)
                                            print("Saving in '" + GLOBAL_PATH +
                                                  "'")
                                            model.save(GLOBAL_PATH + '__' +
                                                       str(i) + '__' +
                                                       args.training_name +
                                                       "_" +
                                                       str(int(t)).zfill(10))

                                            avg_reward, avg_floor = test(
                                                t,
                                                model,
                                                env=env,
                                                global_path=GLOBAL_PATH +
                                                '__' + str(i),
                                                i=i)  # Test
                                            log('T = ' + str(t) + ' / ' +
                                                str(args.num_env_steps_test) +
                                                ' | Avg. reward: ' +
                                                str(avg_reward) +
                                                ' | Avg. floor: ' +
                                                str(avg_floor))

                                            t += args.eval_interval
                                        except Exception as e:

                                            env.close()

                                            myfile = open(
                                                GLOBAL_PATH + filename, 'a')
                                            myfile.write(
                                                '\n An exception %s has occured at step %f'
                                                % (e, t))
                                            myfile.close()

                                            del model

                                            from obstacle_tower_env import ObstacleTowerEnv
                                            env = ObstacleTowerEnv(
                                                '/home/home/Data/Carmen/py_workspace/ObstacleTower_v3/ObstacleTower-v3.1/obstacletower.x86_64',
                                                retro=args.retro,
                                                realtime_mode=args.test,
                                                timeout_wait=6000)

                                            break

                                    env.reset()
                                    break
                                    del model
Пример #55
0
 def all_json(cls):
     ms = cls.all()
     # 要转换为 dict 格式才行
     js = [t.json() for t in ms]
     log('all json is:', js)
     return js
                    'model': model,
                    'train_loss': train_loss,
                    'valid_loss': valid_loss
                }, opt.model_filename + '.model')
            torch.save(optimizer, opt.model_filename + '.optim')
            model.intype("gpu")

        log_string = (
            'iter: {:d}, train_loss: {:0.6f}, valid_loss: {:0.6f}, best_valid_loss: {:0.6f}, lr: {:0.5f}'
        ).format((i + 1) * opt.epoch_size, train_loss[-1], valid_loss[-1],
                 best_valid_loss, opt.lrt)
        print(log_string)
        utils.log(opt.model_filename + '.log', log_string)


if __name__ == '__main__':
    numpy.random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    # build the model
    opt.n_in = opt.ncond * opt.nc
    opt.n_out = opt.npred * opt.nc
    model = models.BaselineModel3Layer(opt).cuda()
    optimizer = optim.Adam(model.parameters(), opt.lrt)
    if opt.loss == 'l1':
        criterion = nn.L1Loss().cuda()
    elif opt.loss == 'l2':
        criterion = nn.MSELoss().cuda()
    print('training...')
    utils.log(opt.model_filename + '.log', '[training]')
    train(500)
Пример #57
0
 def fetchStarship(self, starship):
     #Return a specific starship
     url = self.base_url + 'starships/?search=' + starship
     utils.log(url)
     r = requests.get(url)
     return r
Пример #58
0
def parseInput(input):
    log("parseInput(): input: %s" % input)

    nlp = stanza.Pipeline(lang='en', processors='tokenize,mwt,pos')
    doc = nlp(input)
    log(*[
        f'word: {word.text}\tupos: {word.upos}\txpos: {word.xpos}\tfeats: {word.feats if word.feats else "_"}'
        for sent in doc.sentences for word in sent.words
    ],
        sep='\n')

    parsed = {"nouns": []}
    for sent in doc.sentences:
        for word in sent.words:
            if word.upos == POS_NOUN:
                parsed["nouns"].append(word)
    log("parseInput(): done parsing input, parsed: \n%s" % parsed)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='later to be descripted...')
    parser.add_argument('-text',
                        action='store',
                        type=str,
                        help='The text to parse.')
    args = parser.parse_args()
    log("app.py: input from command line: %s" % repr(args))

    checkModels()
    parseInput(args.text)
Пример #59
0
 def fetchSpeciesById(self, id):
     #Return the species information for a specific species id
     url = self.base_url + 'species/?search=' + id
     utils.log(url)
     r = requests.get(url)
     return r
Пример #60
0
 def fetchStarships(self):
     #return all starships
     url = self.base_url + 'starships/'
     utils.log(url)
     r = requests.get(url)
     return r