Example #1
0
 def _download_video( self ):
     try:
         # spam log file
         LOG( ">>> _download_video(title: %s)" % ( repr( self.g_title ), ), heading=True )
         # get filepath and tmp_filepath
         tmppath, self.filepath = get_legal_filepath( self.g_title, self.params[ "download" ], self.settings[ "play_mode" ], self.settings[ "download_path" ], self.settings[ "use_title" ], self.settings[ "use_trailer" ] )
         # only download if the trailer doesn't exist
         if ( not os.path.isfile( self.filepath.encode( "utf-8" ) ) ):
             # only need to retrieve video if not in tmp path
             if ( not os.path.isfile( tmppath.encode( "utf-8" ) ) ):
                 # fetch the video
                 urllib.urlretrieve( self.params[ "download" ], tmppath.encode( "utf-8" ), self._report_hook )
             # create the conf file for xbox and copy to final location
             ok = self._finalize_download( tmppath )
             # if the copy failed raise an error
             if ( not ok ): raise
     except Exception, e:
         # oops, notify user what error occurred
         LOG( str( e ), xbmc.LOGERROR )
         # filepath is not always released immediately, we may need to try more than one attempt, sleeping between
         urllib.urlcleanup()
         remove_tries = 3
         while remove_tries and os.path.isfile( tmppath ):
             try:
                 os.remove( tmppath.encode( "utf-8" ) )
             except:
                 remove_tries -= 1
                 xbmc.sleep( 1000 )
         pDialog.close()
         self.filepath = ""
Example #2
0
def convex_hull(points):
    """Returns the points on the convex hull of points in CCW order."""

    # Increasing guesses for the hull size.
    for guess in (2**(2**t) for t in range(len(points))):
        LOG("Guess", guess)
        hulls = []
        for i in range(0, len(points), guess):
            # LOG(".")
            # Split the points into chunks of (roughly) the guess.
            chunk = points[i:i + guess]
            # Find the corresponding convex hull of these chunks.
            hulls.append(graham_scan(chunk))

        # Find the extreme point and initialize the list of (hull,point) with it.
        hullpt_pairs = [min_hull_pt_pair(hulls)]

        # Ensure we stop after no more than "guess" iterations.
        for __ in range(guess):
            LOG("*")
            pair = next_hull_pt_pair(hulls, hullpt_pairs[-1])
            if pair == hullpt_pairs[0]:
                # Return the points in sequence
                LOGN("o")
                return [hulls[h][i] for h, i in hullpt_pairs]
            hullpt_pairs.append(pair)
        LOGN("x")
Example #3
0
 def fetch_trailers( self, category=None ):
     # spam log file
     LOG( ">>> fetch_trailers(category: %s, rating: %s, quality: %s)" % ( repr( category ), ( "G", "PG", "PG-13", "R", "NC-17", "No Limit", )[ self.settings[ "rating" ] ], self.Fanart, ), heading=True )
     ok = False
     # initialize trailers list
     trailers = []
     # fetch source
     xmlSource = self._get_xml_source()
     # parse source and add our items
     if ( xmlSource ):
         ok = self._parse_xml_source( xmlSource, category )
     # spam log file
     LOG( "<<< fetch_trailers()", heading=True )
     # return result
     return ok
Example #4
0
 def _get_xml_source( self ):
     try:
         xmlSource = []
         # grab all xml sources
         for source in ( "current.xml", "current_480p.xml", "current_720p.xml", ):
             # set path and url
             base_path = os.path.join( self.BASE_CURRENT_SOURCE_PATH, source )
             base_url = self.BASE_CURRENT_URL % ( source, )
             # get the source files date if it exists
             try: date = os.path.getmtime( base_path )
             except: date = 0
             # we only refresh if it's been more than a day, 24hr * 60min * 60sec
             refresh = ( ( time.time() - ( 24 * 60 * 60 ) ) >= date )
             # only fetch source if it's been more than a day
             if ( refresh ):
                 # open url
                 usock = urllib.urlopen( base_url )
             else:
                 # open path
                 usock = open( base_path, "r" )
             # read source
             xmlSource += [ usock.read() ]
             # close socket
             usock.close()
             # save the xmlSource for future parsing
             if ( refresh ):
                 ok = self._save_xml_source( xmlSource[ -1 ], base_path )
         # return source
         return xmlSource
     except Exception, e:
         # oops, notify user what error occurred
         LOG( str( e ), xbmc.LOGERROR )
         # error so return empty string
         return []
Example #5
0
def merge_enclosed(graph, segments):
    """Merge nodes of the given graph that are on edges that do not intersects with the given segments."""
    i = 0
    while i < len(graph.keys()):
        node = graph.keys()[i]
        j = 0
        altered = False
        while j < len(graph[node]):
            neighbour = graph[node][j]
            assert (neighbour in graph)
            edge = (node, neighbour)

            if not any(
                    geometry.segment_intersection(edge, seg)
                    for seg in segments):
                graph = merge_nodes(graph, edge[0], edge[1],
                                    geometry.middle(*edge))
                altered = True
                LOG(".")
                break
            else:
                j += 1
                continue

        if altered:
            i = 0
        else:
            i += 1

    return graph
Example #6
0
 def _parse_categories( self, xmlSource, category ):
     try:
         # encoding
         encoding = re.findall( "<\?xml version=\"[^\"]*\" encoding=\"([^\"]*)\"\?>", xmlSource[ 0 ] )[ 0 ]
         # gather all trailer records <movieinfo>
         trailers = re.findall( "<movieinfo id=\".+?\"><info>.+?<studio>(.*?)</studio>.+?<director>(.*?)</director>.+?(?:<cast>(.+?)</cast>)?<genre>(.+?)</genre>.+?</movieinfo>", xmlSource[ 0 + ( 2 * ( self.settings[ "trailer_quality" ] > 1 and self.settings[ "trailer_hd_only" ] ) ) ] )
         # use dictionary method to filter out duplicates; set our item list
         dupes = {}
         # enumerate thru the trailers list and create our category list
         for studio, directors, actors, genres in trailers:
             # genres category
             if ( category == "genres" ):
                 # parse genres 
                 genres = re.findall( "<name>(.+?)</name>", genres )
                 # filter out duplicates
                 for x in genres:
                     dupes[ x ] = ( x, "DefaultGenre.png", None, )
             elif ( category == "studios" ):
                 # filter out duplicates
                 dupes[ studio ] = ( studio, "DefaultStudios.png", None, )
             elif ( category == "directors" ):
                 # parse directors 
                 directors = directors.split( ", " )
                 # filter out duplicates
                 for x in directors:
                     dupes[ x ] = ( x, "DefaultDirector.png", None, )
             elif ( category == "actors" ):
                 # parse actors 
                 actors = re.findall( "<name>(.+?)</name>", actors )
                 # filter out duplicates
                 for x in actors:
                     dupes[ x ] = ( x, "DefaultActor.png", "special://profile/Thumbnails/Video/%s/%s" % ( xbmc.getCacheThumbName( "actor" + x )[ 0 ], xbmc.getCacheThumbName( "actor" + x ) ,), )
         # grap the categories
         categories = dupes.values()
         # sort our list
         categories.sort()
         # get our media item
         dirItem = DirectoryItem()
         # set total items
         dirItem.totalItems = len( categories )
         # set as folder since these our virtual folders to filtered lists
         dirItem.isFolder = True
         # add settings menu item
         dirItem.addContextMenuItem( "", "DUMMY TO CLEAR CONTEXT MENU" )
         # enumerate thru and add our items
         for title, icon, thumb in categories:
             # check for cached thumb (only actors)
             if ( thumb is None or not os.path.isfile( thumb ) ):
                 thumb = icon
             # create our listitem
             dirItem.listitem = xbmcgui.ListItem( title, iconImage=icon, thumbnailImage=thumb )
             # set the url
             dirItem.url = "%s?category=%s" % ( sys.argv[ 0 ], urllib.quote_plus( repr( "%s: %s" % ( category, unicode( title, "utf-8" ), ) ) ), )
             # add item
             self.MediaWindow.add( dirItem )
     except Exception, e:
         # oops, notify user what error occurred
         LOG( str( e ), xbmc.LOGERROR )
Example #7
0
def restart():
    LOG("Restart jobs")

    db[INCIDENTS_COLLECTION_NAME].drop()
    db[ADMINS_REACTION_COLLECTION_NAME].drop()
    db[SERVICES_COLLECTION_NAME].drop()
    db[ADMINS_COLLECTION_NAME].drop()
    populate_services(read_yaml(SERVICES_YAML_PATH))
    populate_admins(read_yaml(ADMINS_YAML_PATH))

    #TODO stop previous jobs
    hosts = [DKRON_ADDRESS]
    api = Dkron(hosts)

    jobs = [x['id'] for x in api.get_jobs()]
    LOG("Delete jobs")
    for job in jobs:
        api.delete_job(job)
        LOG(f'Deleted {job}')

    services = get_services()
    LOG("Start scheduling jobs")
    for service in services:
        api.apply_job({
            "schedule": f'@every { service["frequency"] }s',
            "name": str(service['_id']),
            "timezone": "Europe/Warsaw",
            "owner": "Alerting Platform",
            "executor": "shell",
            "executor_config": {
                "command": f'python3 /app/worker.py --url {service["url"]}'
            },
            "processors": {
                "log": {
                    "forward": "true"
                }
            },
            "tags": {
                "worker": "crawler:1"
            }
        })
        LOG(f'Scheduled {service["url"]}')
Example #8
0
def search( cities, max_iterations, nb_ants, decay, w_heuristic, w_pheromone, w_history, c_greedy, cost_func = graph_distance ):
    # like random.shuffle(cities) but on a copy
    best = { "permutation" : sorted( cities, key=lambda i: random.random()) }
    best["cost"] = cost( best["permutation"], cost_func, cities )

    init_pheromone = 1.0 / float(len(cities)) * best["cost"]
    pheromones = initialize_pheromones_whole( cities, init_pheromone )

    for i in range(max_iterations):
        LOG( i )
        solutions = []
        for j in range(nb_ants):
            LOG( "." )
            candidate = {}
            candidate["permutation"] = walk( cities, pheromones, w_heuristic, w_history, c_greedy, cost_func )
            candidate["cost"] = cost( candidate["permutation"], cost_func, cities )
            if candidate["cost"] < best["cost"]:
                best = candidate
            update_local_whole( pheromones, candidate, cities, w_pheromone, init_pheromone )
        update_global_whole( pheromones, best, cities, decay )
        LOGN( best["cost"] )

    return best,pheromones
Example #9
0
 def _add_video( self, video ):
     try:
         # get our media item
         dirItem = DirectoryItem()
         # set the default icon
         icon = "DefaultVideo.png"
         # set an overlay if one is practical
         overlay = ( xbmcgui.ICON_OVERLAY_NONE, xbmcgui.ICON_OVERLAY_HD, )[ "720p." in video[ "trailer" ] or "1080p." in video[ "trailer" ] ]
         # only need to add label and thumbnail, setInfo() and addSortMethod() takes care of label2
         dirItem.listitem = xbmcgui.ListItem( video[ "title" ], iconImage=icon, thumbnailImage=video[ "poster" ] )
         # release date and year
         try:
             # format the date
             release_date = datetime.date( int( video[ "releasedate" ].split( "-" )[ 0 ] ), int( video[ "releasedate" ].split( "-" )[ 1 ] ), int( video[ "releasedate" ].split( "-" )[ 2 ] ) ).strftime( self.date_format )
             # we need just year also
             year = int( video[ "releasedate" ].split( "-" )[ 0 ] )
         except:
             release_date = ""
             year = 0
         # set the key information
         dirItem.listitem.setInfo( "video", { "Title": video[ "title" ], "Overlay": overlay, "Size": video[ "size" ], "Year": year, "Plot": video[ "plot" ], "PlotOutline": video[ "plot" ], "MPAA": video[ "mpaa" ], "Genre": video[ "genre" ], "Studio": video[ "studio" ], "Director": video[ "director" ], "Duration": video[ "duration" ], "Cast": video[ "cast" ], "Date": "%s-%s-%s" % ( video[ "postdate" ][ 8 : ], video[ "postdate" ][ 5 : 7 ], video[ "postdate" ][ : 4 ], ) } )
         # set release date property
         dirItem.listitem.setProperty( "releasedate", release_date )
         # get filepath and tmp_filepath
         tmp_path, filepath = get_legal_filepath( video[ "title" ], video[ "trailer" ].split( "?|" )[ 0 ], 2, self.settings[ "download_path" ], self.settings[ "use_title" ], self.settings[ "use_trailer" ] )
         # set theater showtimes menu item
         dirItem.addContextMenuItem( 30900, "XBMC.RunPlugin(%s?showtimes=%s)" % ( sys.argv[ 0 ], urllib.quote_plus( repr( video[ "title" ] ) ), ) )
         # check if trailer already exists if user specified
         if ( self.settings[ "play_existing" ] and os.path.isfile( filepath.encode( "utf-8" ) ) ):
             dirItem.url = filepath
             # just add play trailer if trailer exists and user preference to always play existing
             dirItem.addContextMenuItem( 30920, "XBMC.PlayMedia(%s,noresume)" % ( dirItem.url, ) )
         elif ( self.settings[ "play_mode" ] == 0 ):
             dirItem.url = video[ "trailer" ]
             # we want both play and download if user preference is to stream
             dirItem.addContextMenuItem( 30910, "XBMC.RunPlugin(%s?download=%s)" % ( sys.argv[ 0 ], urllib.quote_plus( video[ "trailer" ].split( "?|" )[ 0 ] ), ) )
             dirItem.addContextMenuItem( 30920, "XBMC.PlayMedia(%s,noresume)" % ( dirItem.url, ) )
         else:
             dirItem.url = "%s?download=%s" % ( sys.argv[ 0 ], urllib.quote_plus( video[ "trailer" ].split( "?|" )[ 0 ] ) )
             # only add download if user prefernce is not stream
             dirItem.addContextMenuItem( 30910, "XBMC.RunPlugin(%s?download=%s)" % ( sys.argv[ 0 ], urllib.quote_plus( video[ "trailer" ].split( "?|" )[ 0 ] ), ) )
         # add the movie information item
         dirItem.addContextMenuItem( 30930, "XBMC.Action(Info)" )
         # add settings menu item
         dirItem.addContextMenuItem( 1045, "XBMC.RunPlugin(%s?settings=open)" % ( sys.argv[ 0 ], ) )
         # add the item to the media list
         return self.MediaWindow.add( dirItem )
     except Exception, e:
         # oops, notify user what error occurred
         LOG( str( e ), xbmc.LOGERROR )
Example #10
0
 def _save_nfo_file( self, nfoSource, tmp_nfopath ):
     try:
         # open source path for writing
         file_object = open( tmp_nfopath.encode( "utf-8" ), "w" )
         # write xmlSource
         file_object.write( nfoSource.encode( "utf-8" ) )
         # close file object
         file_object.close()
         # return successful
         return True
     except Exception, e:
         # oops, notify user what error occurred
         LOG( str( e ), xbmc.LOGERROR )
         # return failed
         return False
Example #11
0
 def _save_xml_source( self, xmlSource, base_path ):
     try:
         # if the path to the source file does not exist create it
         if ( not os.path.isdir( os.path.dirname( base_path ) ) ):
             os.makedirs( os.path.dirname( base_path ) )
         # open source path for writing
         file_object = open( base_path, "w" )
         # write xmlSource
         file_object.write( xmlSource )
         # close file object
         file_object.close()
         # return successful
         return True
     except Exception, e:
         # oops, notify user what error occurred
         LOG( str( e ), xbmc.LOGERROR )
         # error so return False, we don't actually use this for anything
         return False
Example #12
0
 def _finalize_download( self, tmppath ):
     try:
         if ( tmppath != self.filepath ):
             # copy the trailer
             msg1 = xbmc.getLocalizedString( 30503 ) % ( os.path.basename( self.filepath ), )
             msg2 = xbmc.getLocalizedString( 30502 ) % ( os.path.dirname( self.filepath ), )
             pDialog.update( 0, msg1, msg2 )
             # necessary for dialog to update
             ##xbmc.sleep( 100 )
             # create thumb and nfo paths
             thumbpath = os.path.splitext( self.filepath )[ 0 ] + ".tbn"
             nfopath = os.path.splitext( self.filepath )[ 0 ] + ".nfo"
             # use httpapi for file copying
             xbmc.executehttpapi( "FileCopy(%s,%s)" % ( tmppath.encode( "utf-8" ), self.filepath.encode( "utf-8" ), ) )
             if ( self.settings[ "copy_thumb" ] ):
                 xbmc.executehttpapi( "FileCopy(%s,%s)" % ( self.g_thumbnail, thumbpath.encode( "utf-8" ), ) )
             # create nfo file
             if ( self.settings[ "create_nfo" ] ):
                 ok = self._create_nfo_file( nfopath )
             # create conf file for better MPlayer playback only when trailer saved on xbox and not progressive 
             # TODO: is this still necessary
             if ( os.environ.get( "OS", "xbox" ) == "xbox" ):
                 conffile = u"%s.conf" % ( self.filepath, )
                 if ( not self.filepath.startswith( "smb://" ) and not self.params[ "download" ].endswith( "p.mov" ) and not os.path.isfile( conffile.encode( "utf-8" ) ) ):
                     try:
                         f = open( conffile , "w" )
                         f.write( "nocache=1" )
                         f.close()
                     except:
                         pass #TODO: decide what to do here
             # remove temporary cache file
             os.remove( tmppath.encode( "utf-8" ) )
         # we succeeded
         return True
     except Exception, e:
         # oops, notify user what error occurred
         LOG( str( e ), xbmc.LOGERROR )
         return False
Example #13
0
    def train(self, max_epochs=20, model_dir=None, train_dir='train_set', data_scale='category_scale',
              val_dir='val_set', hreshold=0.5, batch_size=4, write_summary=False, freq_summary=200):

        #load train data
        dataset = self._build_data(train_dir, self.num_classes, 'train')


        dataset = dataset.shuffle(100)
        dataset = dataset.batch(batch_size)
        dataset = dataset.prefetch(20)
        train_init = self.it.make_initializer(dataset)


        #load val data
        valset = self._build_data(val_dir, self.num_classes, 'val')
        valset = valset.batch(20)
        valset = valset.prefetch(10)
        val_init = self.it.make_initializer(valset)

        print("training starts.")
        self._bulid_save_path()

        saver = tf.train.Saver()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            train_writer = tf.summary.FileWriter(self.summary_dir, sess.graph)
            # continue training
            if model_dir:
                print("continue training from " + model_dir)
                saver.restore(sess, model_dir)
            else:
                sess.run(tf.global_variables_initializer())
            # train
            for epoch in range(max_epochs):
                cnt = 0
                sess.run(train_init)
                st = time.time()
                print("epoch {} begins:".format(epoch))
                try:
                    while True:
                        if write_summary:
                            _, loss, acc, summary, step = sess.run([self.train_op,
                                                               self.loss,
                                                               self.accuracy,
                                                               self.merged,
                                                               self.global_step])
                            lo = sess.run(self.logits)
                            # print(lo)
                            # print('\n')


                            # evaluator.evaluate(pred_heatmap, label, img_path)
                            if step % freq_summary == 0:
                                # summary
                                train_writer.add_summary(summary, step)
                        else:
                            _, loss, acc, step = sess.run([self.train_op, self.loss, self.accuracy, self.global_step])
                        cnt += batch_size
                        if cnt % (batch_size*2) == 0:
                            string_print = "Epoch = %d Nums = %d Loss = %.4f Train_Acc = %.4f  Time = %.2f" % ( epoch, cnt, loss, acc, time.time() - st)
                            LOG(string_print)
                            st = time.time()

                except tf.errors.OutOfRangeError:
                    print('saving checkpoint......')
                    saver.save(sess, os.path.join(self.model_dir, str('checkpoint_' + str(epoch + 1))))
                    print('checkpoint saved.')
                    self.val_out(sess=sess, val_init=val_init)
Example #14
0
if ask_for.voronoi:
    with open(ask_for.voronoi) as fd:
        voronoi_graph = graph.load(fd)

else:
    LOGN("Compute the Voronoï diagram of the triangulation")
    # Changing the neighborhood to be on vertices instead of edges will not compute the true Voronoï dual graph,
    # but we want this graph to represent the relations on vertices of the tiles.
    voronoi_tri_graph = voronoi.dual(triangulated,
                                     neighborhood=voronoi.vertices_neighbours)
    # voronoi_tri_edges   = graph.edges_of(voronoi_tri_graph)
    # voronoi_tri_centers = graph.nodes_of(voronoi_tri_graph)

    LOGN("\tMerge nodes that are both located within a single diamond")
    LOG("\t\tMerge", len(voronoi_tri_graph), "nodes")
    with open("d%i_voronoi_dual.graph" % depth, "w") as fd:
        graph.write(voronoi_tri_graph, fd)
    voronoi_graph = voronoi.merge_enclosed(voronoi_tri_graph, penrose_segments)
    LOGN("as", len(voronoi_graph), "enclosed nodes")

    with open("d%i_voronoi.graph" % depth, "w") as fd:
        graph.write(voronoi_graph, fd)

voronoi_edges = graph.edges_of(voronoi_graph)
voronoi_centers = graph.nodes_of(voronoi_graph)

########################################################################
# PLOT
########################################################################
    def train(self,
              max_epochs=20,
              model_dir=None,
              train_dir='train_set',
              val_dir='val_set',
              threshold=0.5,
              batch_size=4,
              write_summary=False,
              freq_summary=200):

        #load train data
        dataset = self._build_data(train_dir, 'train')
        dataset = dataset.shuffle(100)
        dataset = dataset.batch(batch_size)
        dataset = dataset.prefetch(20)
        train_init = self.it.make_initializer(dataset)

        #load val data
        valset = self._build_data(val_dir, 'val')
        valset = valset.batch(1)
        valset = valset.prefetch(10)
        val_init = self.it.make_initializer(valset)

        print("training starts.")
        self._bulid_save_path()

        # variables_to_restore = slim.get_model_variables()
        # for var in variables_to_restore:
        #     print(var.op.name)
        # variables_to_restore = {name_in_checkpoint(var): var for var in variables_to_restore}

        # restorer = tf.train.Saver(variables_to_restore)
        saver = tf.train.Saver()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            train_writer = tf.summary.FileWriter(self.summary_dir, sess.graph)
            # continue training
            if model_dir:
                print("continue training from " + model_dir)
                saver.restore(sess, model_dir)
            else:
                sess.run(tf.global_variables_initializer())
                # restorer.restore(sess, 'model/resnet_v1_50.ckpt')
            # train
            for epoch in range(max_epochs):
                cnt = 0
                sess.run(train_init)
                st = time.time()
                print("epoch {} begins:".format(epoch))
                try:
                    while True:
                        if write_summary:
                            _, loss, summary, step = sess.run([
                                self.train_op, self.loss, self.merged,
                                self.global_step
                            ])

                            # evaluator.evaluate(pred_heatmap, label, img_path)
                            if step % freq_summary == 0:
                                # summary
                                train_writer.add_summary(summary, step)
                        else:
                            _, loss, step = sess.run(
                                [self.train_op, self.loss, self.global_step])
                        cnt += batch_size
                        if cnt % 20 == 0:
                            string_print = "Epoch = %d Count = %d Current_Loss = %.4f Time = %.2f" % (
                                epoch, cnt, loss, time.time() - st)
                            LOG(string_print)
                            st = time.time()

                except tf.errors.OutOfRangeError:
                    print('saving checkpoint......')
                    saver.save(
                        sess,
                        os.path.join(self.model_dir,
                                     str(self.model + '_' + str(epoch + 1))))
                    print('checkpoint saved.')
                    self.val_out(sess=sess,
                                 val_init=val_init,
                                 threshold=threshold,
                                 output_dir=self.val_dir,
                                 epoch=epoch)
Example #16
0
def delaunay_bowyer_watson(points,
                           supertri=None,
                           superdelta=0.1,
                           epsilon=sys.float_info.epsilon,
                           do_plot=None,
                           plot_filename="Bowyer-Watson_%i.png"):
    """Return the Delaunay triangulation of the given points

    epsilon: used for floating point comparisons, two points are considered equals if their distance is < epsilon.
    do_plot: if not None, plot intermediate steps on this matplotlib object and save them as images named: plot_filename % i
    """

    if do_plot and len(points) > 10:
        print "WARNING it is a bad idea to plot each steps of a triangulation of many points"

    # Sort points first on the x-axis, then on the y-axis.
    vertices = sorted(points)

    # LOGN( "super-triangle",supertri )
    if not supertri:
        supertri = supertriangle(vertices, superdelta)

    # It is the first triangle of the list.
    triangles = [supertri]

    completed = {supertri: False}

    # The predicate returns true if at least one of the vertices
    # is also found in the supertriangle.
    def match_supertriangle(tri):
        if tri[0] in supertri or \
           tri[1] in supertri or \
           tri[2] in supertri:
            return True

    # Returns the base of each plots, with points, current triangulation, super-triangle and bounding box.
    def plot_base(ax, vi=len(vertices), vertex=None):
        ax.set_aspect('equal')
        # regular points
        scatter_x = [p[0] for p in vertices[:vi]]
        scatter_y = [p[1] for p in vertices[:vi]]
        ax.scatter(scatter_x, scatter_y, s=30, marker='o', facecolor="black")
        # super-triangle vertices
        scatter_x = [p[0] for p in list(supertri)]
        scatter_y = [p[1] for p in list(supertri)]
        ax.scatter(scatter_x,
                   scatter_y,
                   s=30,
                   marker='o',
                   facecolor="lightgrey",
                   edgecolor="lightgrey")
        # current vertex
        if vertex:
            ax.scatter(vertex[0],
                       vertex[1],
                       s=30,
                       marker='o',
                       facecolor="red",
                       edgecolor="red")
        # current triangulation
        uberplot.plot_segments(ax,
                               edges_of(triangles),
                               edgecolor="blue",
                               alpha=0.5,
                               linestyle='solid')
        # bounding box
        (xmin, ymin), (xmax, ymax) = bounds(vertices)
        uberplot.plot_segments(ax,
                               tour([(xmin, ymin), (xmin, ymax), (xmax, ymax),
                                     (xmax, ymin)]),
                               edgecolor="magenta",
                               alpha=0.2,
                               linestyle='dotted')

    # Insert vertices one by one.
    LOG("Insert vertices: ")
    if do_plot:
        it = 0
    for vi, vertex in enumerate(vertices):
        # LOGN( "\tvertex",vertex )
        assert (len(vertex) == 2)

        if do_plot:
            ax = do_plot.add_subplot(111)
            plot_base(ax, vi, vertex)

        # All the triangles whose circumcircle encloses the point to be added are identified,
        # the outside edges of those triangles form an enclosing polygon.

        # Forget previous candidate polygon's edges.
        enclosing = []

        removed = []
        for triangle in triangles:
            # LOGN( "\t\ttriangle",triangle )
            assert (len(triangle) == 3)

            # Do not consider triangles already tested.
            # If completed has a key, test it, else return False.
            if completed.get(triangle, False):
                # LOGN( "\t\t\tAlready completed" )
                # if do_plot:
                # uberplot.plot_segments( ax, tour(list(triangle)), edgecolor = "magenta", alpha=1, lw=1, linestyle='dotted' )
                continue

            # LOGN( "\t\t\tCircumcircle" )
            assert (triangle[0] != triangle[1] and triangle[1] != triangle[2]
                    and triangle[2] != triangle[0])
            center, radius = circumcircle(triangle, epsilon)

            # If it match Delaunay's conditions.
            if x(center) < x(vertex) and math.sqrt(
                (x(vertex) - x(center))**2) > radius:
                # LOGN( "\t\t\tMatch Delaunay, mark as completed" )
                completed[triangle] = True

            # If the current vertex is inside the circumscribe circle of the current triangle,
            # add the current triangle's edges to the candidate polygon.
            if in_circle(vertex, center, radius, epsilon):
                # LOGN( "\t\t\tIn circumcircle, add to enclosing polygon",triangle )
                if do_plot:
                    circ = plot.Circle(center,
                                       radius,
                                       facecolor='yellow',
                                       edgecolor="orange",
                                       alpha=0.2,
                                       clip_on=False)
                    ax.add_patch(circ)

                for p0, p1 in tour(list(triangle)):
                    # Then add this edge to the polygon enclosing the vertex,
                    enclosing.append((p0, p1))
                # and remove the corresponding triangle from the current triangulation.
                removed.append(triangle)
                completed.pop(triangle, None)

            elif do_plot:
                circ = plot.Circle(center,
                                   radius,
                                   facecolor='lightgrey',
                                   edgecolor="grey",
                                   alpha=0.2,
                                   clip_on=False)
                ax.add_patch(circ)

        # end for triangle in triangles

        # The triangles in the enclosing polygon are deleted and
        # new triangles are formed between the point to be added and
        # each outside edge of the enclosing polygon.

        # Actually remove triangles.
        for triangle in removed:
            triangles.remove(triangle)

        # Remove duplicated edges.
        # This leaves the edges of the enclosing polygon only,
        # because enclosing edges are only in a single triangle,
        # but edges inside the polygon are at least in two triangles.
        hull = []
        for i, (p0, p1) in enumerate(enclosing):
            # Clockwise edges can only be in the remaining part of the list.
            # Search for counter-clockwise edges as well.
            if (p0, p1) not in enclosing[i + 1:] and (p1, p0) not in enclosing:
                hull.append((p0, p1))
            elif do_plot:
                uberplot.plot_segments(ax, [(p0, p1)],
                                       edgecolor="white",
                                       alpha=1,
                                       lw=1,
                                       linestyle='dotted')

        if do_plot:
            uberplot.plot_segments(ax,
                                   hull,
                                   edgecolor="red",
                                   alpha=1,
                                   lw=1,
                                   linestyle='solid')

        # Create new triangles using the current vertex and the enclosing hull.
        # LOGN( "\t\tCreate new triangles" )
        for p0, p1 in hull:
            assert (p0 != p1)
            triangle = tuple([p0, p1, vertex])
            # LOGN("\t\t\tNew triangle",triangle)
            triangles.append(triangle)
            completed[triangle] = False

            if do_plot:
                uberplot.plot_segments(ax, [(p0, vertex), (p1, vertex)],
                                       edgecolor="green",
                                       alpha=1,
                                       linestyle='solid')

        if do_plot:
            plot.savefig(plot_filename % it, dpi=150)
            plot.clf()

            it += 1
        LOG(".")

    # end for vertex in vertices
    LOGN(" done")

    # Remove triangles that have at least one of the supertriangle vertices.
    # LOGN( "\tRemove super-triangles" )

    # Filter out elements for which the predicate is False,
    # here: *keep* elements that *do not* have a common vertex.
    # The filter is a generator, so we must make a list with it to actually get the data.
    triangulation = list(filter_if_not(match_supertriangle, triangles))

    if do_plot:
        ax = do_plot.add_subplot(111)
        plot_base(ax)
        uberplot.plot_segments(ax,
                               edges_of(triangles),
                               edgecolor="red",
                               alpha=0.5,
                               linestyle='solid')
        uberplot.plot_segments(ax,
                               edges_of(triangulation),
                               edgecolor="blue",
                               alpha=1,
                               linestyle='solid')
        plot.savefig(plot_filename % it, dpi=150)
        plot.clf()

    return triangulation
Example #17
0
class Main:
    # TODO: we may need to store these in the addContextMenuItem() call, when using a mouse, if the user
    #           moves, before this module can be imported the selection can change.
    # set our title
    g_title = unicode( xbmc.getInfoLabel( "ListItem.Title" ), "utf-8" )
    # set our studio (only works if the user is using the video library)
    g_studio = unicode( xbmc.getInfoLabel( "ListItem.Studio" ), "utf-8" )
    # set our studio (only works if the user is using the video library)
    g_director = unicode( xbmc.getInfoLabel( "ListItem.Director" ), "utf-8" )
    # set our genre (only works if the user is using the video library)
    g_genre = unicode( xbmc.getInfoLabel( "ListItem.Genre" ), "utf-8" )
    # set our rating (only works if the user is using the video library)
    g_mpaa_rating = unicode( xbmc.getInfoLabel( "ListItem.MPAA" ), "utf-8" )
    # set our thumbnail
    g_thumbnail = unicode( xbmc.getInfoImage( "ListItem.Thumb" ), "utf-8" )
    # set our plotoutline
    g_plotoutline = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )
    # set movie url
    g_movie_url = unicode( xbmc.getInfoLabel( "ListItem.FilenameAndPath" ), "utf-8" )
    # set our released date
    g_releasedate = xbmc.getInfoLabel( "ListItem.Property(releasedate)" )
    # set our trailer duration
    g_duration = xbmc.getInfoLabel( "ListItem.Duration" )
    # set cast list
    g_cast = unicode( " / ".join( xbmc.getInfoLabel( "ListItem.Cast" ).split( "\n" ) ), "utf-8" )
    # set our year
    g_year = 0
    if ( xbmc.getInfoLabel( "ListItem.Year" ) ):
        g_year = int( xbmc.getInfoLabel( "ListItem.Year" ) )

    def __init__( self ):
        # parse argv
        self._parse_argv()
        # get user preferences
        self._get_settings()
        # download the video
        self._download_video()
        # play the video
        self._play_video()

    def _parse_argv( self ):
        # parse sys.argv for params and return result
        self.params = dict( urllib.unquote_plus( arg ).split( "=" ) for arg in sys.argv[ 2 ][ 1 : ].split( "&" ) )
        # apple's xml is utf-8 so create a utf-8 url
        self.params[ "download" ] = unicode( self.params[ "download" ], "utf-8" )

    def _get_settings( self ):
        self.settings = {}
        self.settings[ "download_path" ] = xbmcplugin.getSetting( "download_path" )
        self.settings[ "play_mode" ] = int( xbmcplugin.getSetting( "play_mode" ) )
        if ( self.settings[ "play_mode" ] == 2 and self.settings[ "download_path" ] == "" ):
            self.settings[ "play_mode" ] = 1
        self.settings[ "use_title" ] = ( xbmcplugin.getSetting( "use_title" ) == "true" and self.settings[ "play_mode" ] == 2 )
        self.settings[ "use_trailer" ] = ( xbmcplugin.getSetting( "use_trailer" ) == "true" and self.settings[ "play_mode" ] == 2 )
        self.settings[ "create_nfo" ] = ( xbmcplugin.getSetting( "create_nfo" ) == "true" )
        self.settings[ "copy_thumb" ] = ( xbmcplugin.getSetting( "copy_thumb" ) == "true" )
        self.settings[ "trailer_quality" ] = ( "Standard", "480p", "720p", "1080p", )[ int( xbmcplugin.getSetting( "trailer_quality" ) ) ]

    def _download_video( self ):
        try:
            # spam log file
            LOG( ">>> _download_video(title: %s)" % ( repr( self.g_title ), ), heading=True )
            # get filepath and tmp_filepath
            tmppath, self.filepath = get_legal_filepath( self.g_title, self.params[ "download" ], self.settings[ "play_mode" ], self.settings[ "download_path" ], self.settings[ "use_title" ], self.settings[ "use_trailer" ] )
            # only download if the trailer doesn't exist
            if ( not os.path.isfile( self.filepath.encode( "utf-8" ) ) ):
                # only need to retrieve video if not in tmp path
                if ( not os.path.isfile( tmppath.encode( "utf-8" ) ) ):
                    # fetch the video
                    urllib.urlretrieve( self.params[ "download" ], tmppath.encode( "utf-8" ), self._report_hook )
                # create the conf file for xbox and copy to final location
                ok = self._finalize_download( tmppath )
                # if the copy failed raise an error
                if ( not ok ): raise
        except Exception, e:
            # oops, notify user what error occurred
            LOG( str( e ), xbmc.LOGERROR )
            # filepath is not always released immediately, we may need to try more than one attempt, sleeping between
            urllib.urlcleanup()
            remove_tries = 3
            while remove_tries and os.path.isfile( tmppath ):
                try:
                    os.remove( tmppath.encode( "utf-8" ) )
                except:
                    remove_tries -= 1
                    xbmc.sleep( 1000 )
            pDialog.close()
            self.filepath = ""
        # spam log file
        LOG( "<<< _download_video()", heading=True )
Example #18
0
def train_proxnet(args):
    check_paths(args)
    # init GPU configuration
    args.dtype = set_gpu(args.cuda)

    # init seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    # define training data
    train_dataset = data.MRFData(mod='train', sampling=args.sampling)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True)

    # init operators (subsampling + subspace dimension reduction + Fourier transformation)
    operator = OperatorBatch(sampling=args.sampling.upper()).cuda()
    H, HT = operator.forward, operator.adjoint
    bloch = BLOCH().cuda()

    # init PGD-Net (proxnet)
    proxnet = ProxNet(args).cuda()

    # init optimizer
    optimizer = torch.optim.Adam([{
        'params': proxnet.transformnet.parameters(),
        'lr': args.lr,
        'weight_decay': args.weight_decay
    }, {
        'params': proxnet.alpha,
        'lr': args.lr2
    }])

    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[20],
                                                     gamma=0.1)

    # init loss
    mse_loss = torch.nn.MSELoss()  #.cuda()

    # init meters
    log = LOG(args.save_model_dir,
              filename=args.filename,
              field_name=[
                  'iter', 'loss_m', 'loss_x', 'loss_y', 'loss_total', 'alpha'
              ])

    loss_epoch = 0
    loss_m_epoch, loss_x_epoch, loss_y_epoch = 0, 0, 0

    # start PGD-Net training
    print('start training...')
    for e in range(args.epochs):
        proxnet.train()
        loss_m_seq = []
        loss_x_seq = []
        loss_y_seq = []
        loss_total_seq = []

        for x, m, y in train_loader:
            # covert data type (cuda)
            x, m, y = x.type(args.dtype), m.type(args.dtype), y.type(
                args.dtype)
            # add noise
            noise = args.noise_sigam * torch.randn(y.shape).type(args.dtype)
            HTy = HT(y + noise).type(args.dtype)

            # PGD-Net computation (iteration)
            # output the reconstructions (sequence) of MRF image x and its tissue property map m
            m_seq, x_seq = proxnet(HTy, H, HT, bloch)

            loss_x, loss_y, loss_m = 0, 0, 0
            for t in range(args.time_step):
                loss_y += mse_loss(H(x_seq[t]), y) / args.time_step
            for i in range(3):
                loss_m += args.loss_weight['m'][i] * mse_loss(
                    m_seq[-1][:, i, :, :], m[:, i, :, :])
            loss_x = mse_loss(x_seq[-1], x)

            # compute loss
            loss_total = loss_m + args.loss_weight[
                'x'] * loss_x + args.loss_weight['y'] * loss_y

            # update gradient
            optimizer.zero_grad()
            loss_total.backward()
            optimizer.step()

            # update meters
            loss_m_seq.append(loss_m.item())
            loss_x_seq.append(loss_x.item())
            loss_y_seq.append(loss_y.item())
            loss_total_seq.append(loss_total.item())

        # (scheduled) update learning rate
        scheduler.step()

        # print meters
        loss_m_epoch = np.mean(loss_m_seq)
        loss_x_epoch = np.mean(loss_x_seq)
        loss_y_epoch = np.mean(loss_y_seq)
        loss_epoch = np.mean(loss_total_seq)

        log.record(e + 1, loss_m_epoch, loss_x_epoch, loss_y_epoch, loss_epoch,
                   proxnet.alpha.detach().cpu().numpy())
        logT(
            "==>Epoch {}\tloss_m: {:.6f}\tloss_x: {:.6f}\tloss_y: {:.6f}\tloss_total: {:.6f}\talpha: {}"
            .format(e + 1, loss_m_epoch, loss_x_epoch, loss_y_epoch,
                    loss_epoch,
                    proxnet.alpha.detach().cpu().numpy()))

        # save checkpoint
        if args.checkpoint_model_dir is not None and (
                e + 1) % args.checkpoint_interval == 0:
            proxnet.eval()
            ckpt = {
                'epoch': e + 1,
                'loss_m': loss_m_epoch,
                'loss_x': loss_x_epoch,
                'loss_y': loss_y_epoch,
                'total_loss': loss_epoch,
                'net_state_dict': proxnet.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'alpha': proxnet.alpha.detach().cpu().numpy()
            }
            torch.save(
                ckpt,
                os.path.join(args.checkpoint_model_dir,
                             'ckp_epoch_{}.pt'.format(e)))
            proxnet.train()

    # save model
    proxnet.eval()
    state = {
        'epoch': args.epochs,
        'loss_m': loss_m_epoch,
        'loss_x': loss_x_epoch,
        'loss_y': loss_y_epoch,
        'total_loss': loss_epoch,
        'alpha': proxnet.alpha.detach().cpu().numpy(),
        'net_state_dict': proxnet.state_dict(),
        'optimizer_state_dict': optimizer.state_dict()
    }
    save_model_path = os.path.join(args.save_model_dir, log.filename + '.pt')
    torch.save(state, save_model_path)
    print("\nDone, trained model saved at", save_model_path)
Example #19
0
 def _parse_trailers( self, xmlSource, category ):
     try:
         # set our post dates for the recently added list
         old_postdate_min = new_postdate_min = xbmcplugin.getSetting( "postdate_min" )
         old_postdate_max = new_postdate_max = xbmcplugin.getSetting( "postdate_max" )
         # set our ratings
         mpaa_ratings = { "G": 0, "PG": 1, "PG-13": 2, "R": 3, "NC-17": 4, None: 5 }
         # encoding
         encoding = re.findall( "<\?xml version=\"[^\"]*\" encoding=\"([^\"]*)\"\?>", xmlSource[ 0 ] )[ 0 ]
         # split category
         if ( category is not None ):
             category, category_value = category.split( ": " )
         # gather all trailer records <movieinfo>
         trailers = re.findall( "<movieinfo id=\"(.+?)\"><info><title>(.+?)</title><runtime>(.*?)</runtime><rating>(.*?)</rating><studio>(.*?)</studio><postdate>(.*?)</postdate><releasedate>(.*?)</releasedate><copyright>(.*?)</copyright><director>(.*?)</director><description>(.*?)</description></info>(?:<cast>(.+?)</cast>)?(?:<genre>(.+?)</genre>)?<poster><location>(.*?)</location>(?:</poster><poster>)?<xlarge>(.*?)</xlarge></poster><preview><large filesize=\"(.+?)\">(.+?)</large></preview></movieinfo>", xmlSource[ 0 + ( 2 * ( self.settings[ "trailer_quality" ] > 1 and self.settings[ "trailer_hd_only" ] ) ) ] )
         trailers_480p = dict( re.findall( "<movieinfo id=\"(.+?)\">.+?<preview><large filesize=\"(.+?\">.+?)</large></preview></movieinfo>", xmlSource[ 1 ] ) )
         trailers_720p = dict( re.findall( "<movieinfo id=\"(.+?)\">.+?<preview><large filesize=\"(.+?\">.+?)</large></preview></movieinfo>", xmlSource[ 2 ] ) )
         # enumerate thru the movies list and gather info
         for trailer in trailers:
             # encode/clean title
             title = unicode( unescape( trailer[ 1 ] ), encoding, "replace" )
             # format post date, we do this here so filtered lists won't affect the actual results
             postdate = trailer[ 5 ]
             # check if this is a new trailer
             if ( postdate > old_postdate_max ):
                 if ( not new_postdate_min or postdate < new_postdate_min or new_postdate_min == old_postdate_min ):
                     new_postdate_min = postdate
                 if ( postdate > new_postdate_max ):
                     new_postdate_max = postdate
             if ( category == "recent" and postdate < old_postdate_min ):
                 LOG( "* Skipping *: %s   Preferred: %s, Trailer: %s [RECENT]" % ( repr( title ).ljust( 50 ), old_postdate_min, postdate, ) )
                 continue
             # check for valid mpaa rating
             if ( self.settings[ "rating" ] < mpaa_ratings.get( trailer[ 3 ], self.settings[ "not_rated_rating" ] ) ):
                 LOG( "* Skipping *: %s   Preferred: %s, Trailer: %s" % ( repr( title ).ljust( 50 ), ( "G", "PG", "PG-13", "R", "NC-17", "--", )[ self.settings[ "rating" ] ], ( "%s (%s)" % ( ( "G", "PG", "PG-13", "R", "NC-17", "--", )[ self.settings[ "not_rated_rating" ] ], trailer[ 3 ], ), trailer[ 3 ], )[ trailer[ 3 ] != "Not yet rated" ] , ) )
                 continue
             mpaa = ( trailer[ 3 ], "Rated %s" % ( trailer[ 3 ], ), )[ trailer[ 3 ] != "" and trailer[ 3 ] != "Not yet rated" ]
             # parse genres 
             genres = re.findall( "<name>(.+?)</name>", trailer[ 11 ] )
             # if a genre based category, check it
             if ( category == "genres" and category_value not in genres ):
                 LOG( "* Skipping *: %s   Preferred: %s, Trailer: %s [GENRE]" % ( repr( title ).ljust( 50 ), category_value, " / ".join( genres ), ) )
                 continue
             # encode/clean studio
             studio = unicode( unescape( trailer[ 4 ] ), encoding, "replace" )
             # if a studio based category, check it
             if ( category == "studios" and studio != category_value ):
                 LOG( "* Skipping *: %s   Preferred: %s, Trailer: %s [STUDIO]" % ( repr( title ).ljust( 50 ), repr( category_value ), repr( studio ), ) )
                 continue
             # encode/clean director
             director = unicode( unescape( trailer[ 8 ].replace( ", ", " | " ) ), encoding, "replace" )
             # if a director based category, check it
             if ( category == "directors" and category_value not in director ):
                 LOG( "* Skipping *: %s   Preferred: %s, Trailer: %s [DIRECTOR]" % ( repr( title ).ljust( 50 ), repr( category_value ), repr( director ), ) )
                 continue
             # parse actors 
             actors = unicode( unescape( " / ".join( re.findall( "<name>(.+?)</name>", trailer[ 10 ] ) ) ), encoding, "replace" ).split( " / " )
             # if a actor based category, check it
             if ( category == "actors" and category_value not in " / ".join( actors ) ):
                 LOG( "* Skipping *: %s   Preferred: %s, Trailer: %s [ACTOR]" % ( repr( title ).ljust( 50 ), repr( category_value ), repr( " / ".join( actors ) ), ) )
                 continue
             # encode/clean copyright
             copyright = unicode( unescape( trailer[ 7 ] ), encoding, "replace" )
             # convert size to long
             size = long( trailer[ 14 ] )
             # add User-Agent to correct poster url
             poster = ( trailer[ 13 ] or trailer[ 12 ] ) + "?|User-Agent=%s" % ( urllib.quote_plus( sys.modules[ "__main__" ].__useragent__ ), )
             # set initial trailer url
             trailer_url = unicode( trailer[ 15 ], "utf-8" )
             # select preferred trailer quality
             if ( self.settings[ "trailer_quality" ] > 0 ):
                 if ( self.settings[ "trailer_quality" ] > 1 and trailers_720p.has_key( trailer[ 0 ] ) ):
                     if ( not self.settings[ "trailer_hd_only" ] ):
                         size, trailer_url = trailers_720p[ trailer[ 0 ] ].split( "\">" )
                     # replace with 1080p if user preference is 1080p
                     if ( self.settings[ "trailer_quality" ] == 3 ):
                         trailer_url = trailer_url.replace( "a720p.m4v", "h1080p.mov" )
                 elif ( trailers_480p.has_key( trailer[ 0 ] ) ):
                     size, trailer_url = trailers_480p[ trailer[ 0 ] ].split( "\">" )
                 # convert size to long
                 size = long( size )
             # add User-Agent to trailer url
             trailer_url += "?|User-Agent=%s" % ( urllib.quote_plus( sys.modules[ "__main__" ].__useragent__ ), )
             # encode/clean plot
             plot = unicode( unescape( trailer[ 9 ] ), encoding, "replace" )
             # duration of trailer
             # this displays right in video info dialog, but not in the lists (the formula xbmc uses does not accept seconds)
             #duration = xbmc.getLocalizedString( 14044 ).replace( "%i", "%s" ) % ( trailer[ 2 ], )
             duration = "%s:%s" % ( trailer[ 2 ].replace( ":", "" ).rjust( 4, "0" )[ : 2 ], trailer[ 2 ].replace( ":", "" ).rjust( 4, "0" )[ 2 : ], )
             # format release date
             releasedate = trailer[ 6 ]
             # add the item to our media list
             ok = self._add_video( { "title": title, "duration": duration, "mpaa": mpaa, "studio": studio, "postdate": postdate, "releasedate": releasedate, "copyright": copyright, "director": director, "plot": plot, "cast": actors, "genre": " / ".join( genres ), "poster": poster, "trailer": trailer_url, "size": size } )
             # if error adding video, raise an exeption
             if ( not ok ): raise
         # set our new postdate
         xbmcplugin.setSetting( "postdate_min", new_postdate_min )
         xbmcplugin.setSetting( "postdate_max", new_postdate_max )
     except Exception, e:
         # oops, notify user what error occurred
         LOG( str( e ), xbmc.LOGERROR )
         self.success = False