Exemplo n.º 1
0
 def extract_ngram(self, opcode_sequence, N, reduce_dimension=False, unfiltered_opcode_list=None):
     each_ngram_count = {}
     ngram_feature_vector = []
     
     ngram_sequence = self.extract_ngram_sequence(opcode_sequence, N)
     for ngram in ngram_sequence:
         if each_ngram_count.has_key(ngram):
             each_ngram_count[ngram] += 1
         else:
             each_ngram_count[ngram] = 1
             
     self.set_ngram_variety_from_database(N)
     
     # TODO: test around dimension reduction
     if reduce_dimension is True:
         if unfiltered_opcode_list is None:
             print 'Unfiltered opcode list is not specified'
             
             db_handler = DatabaseHandler()
             unfiltered_opcode_list = db_handler.extract_unfiltered_opcode()
             print 'Extracted from database'
             
         self.reduce_ngram_variety(unfiltered_opcode_list)
             
     for variety in self.ngram_variety_:     # set ngram_variety_ before here
         if variety in each_ngram_count:
             ngram_feature_vector.append(each_ngram_count[variety])
         else:
             ngram_feature_vector.append(0)
             
     return ngram_feature_vector
Exemplo n.º 2
0
    def save_feature(self, file_name, save_file_name):
        db_handler = DatabaseHandler()
        
        opcode_variety = db_handler.extract_opcode_variety()

        opcode_sequence_O0 = db_handler.extract_opcode_sequence(file_name=file_name + '_MinGW_O0')
        opcode_sequence_O1 = db_handler.extract_opcode_sequence(file_name=file_name + '_MinGW_O1')
        opcode_sequence_O2 = db_handler.extract_opcode_sequence(file_name=file_name + '_MinGW_O2')
        opcode_sequence_O3 = db_handler.extract_opcode_sequence(file_name=file_name + '_MinGW_O3')
        
        with open(self.csv_save_dir_name + os.sep + save_file_name, 'wb') as f:
            writer = csv.writer(f)
            writer.writerow([file_name, 'O0', 'O1', 'O2', 'O3'])
            for opcode in opcode_variety:
                row = []
                row.append(opcode)
                row.append(opcode_sequence_O0.count(opcode))
                row.append(opcode_sequence_O1.count(opcode))
                row.append(opcode_sequence_O2.count(opcode))
                row.append(opcode_sequence_O3.count(opcode))
                writer.writerow(row)
            row = []
            row.append('Sum')
            row.append(len(opcode_sequence_O0))
            row.append(len(opcode_sequence_O1))
            row.append(len(opcode_sequence_O2))
            row.append(len(opcode_sequence_O3))
            writer.writerow(row)
Exemplo n.º 3
0
    def __init__(self, queue):
        self.queue = queue  # queue used to communicate with other windows

        self.layout = None
        self.window = None

        self.db_handler = DatabaseHandler('puzzles.db')
        self.selected_data = []
Exemplo n.º 4
0
 def extract_data_from_file(self, file_name, extraction_method, label_type):
     db_handler = DatabaseHandler()
     
     id = db_handler.lookup_id_from_file_name(file_name)
     label, feature_vector = self.extract_data(id, extraction_method,
                                               label_type)
     
     return label, feature_vector
Exemplo n.º 5
0
 def __init__(self, id, system, workerArgs = None):
     DatabaseHandler.__init__(self)
     self.changesLock = Lock()
     #this is incremented for SQL Updates used to transfer items from cluster to cluster.
     self.changes = 0
     #this is incremented for each SQL Select emited to determine where an item is most pulled. 
     self.i = 0
     self.itemExchangeLoopClosingFailures = 0 
     IndependentWorker.__init__(self, id, system)
Exemplo n.º 6
0
 def match_string_signature(self):
     """
     TODO: Examination of voting method
     """
     db_handler = DatabaseHandler()
     
     string_list = self.extract_string(file_name)
     string_signature_dict = db_handler.extract_string_signature()
     for sig in string_signature_dict.keys():
         if sig in string_list:
             return string_signature_dict[sig]
Exemplo n.º 7
0
    def __init__(self):
        self.width = 0  # number of columns
        self.height = 0  # number of rows
        self.rows = []  # 2d array storing hints for rows
        self.cols = []  # 2d array storing hints for columns
        self.board = []  # 2D numpy array storing board tiles state

        self.db_handler = DatabaseHandler(
            'puzzles.db'
        )  # pointer to database handler used for loading puzzles from database
        self.solution = [
        ]  # 2d array storing solution of the puzzle (1st row, 2nd row, etc.)
Exemplo n.º 8
0
 def __init__(self):
     super(ApiPoller, self).__init__()
     self.__stop_event = threading.Event()
     dbname = os.environ['DB_NAME']
     dbuser = os.environ['DB_USER']
     dbpass = os.environ['DB_PASS']
     dbhost = os.environ['DB_HOST']
     dbport = os.environ['DB_PORT']
     self.database = DatabaseHandler(dbname=dbname,
                                     user=dbuser,
                                     password=dbpass,
                                     host=dbhost,
                                     port=dbport)
Exemplo n.º 9
0
 def extract_compiler_label(self, file_id):
     db_handler = DatabaseHandler()
     
     compiler = db_handler.extract_compiler(file_id)
     if compiler == 'Borland C++ Compiler':
         return 0
     elif compiler == 'Microsoft Visual C++':
         return 1
     elif compiler == 'MinGW':
         return 2
     else:
         print 'Error: unknown compiler'
         sys.exit()
Exemplo n.º 10
0
 def extract_all_data(self, extraction_method, label_type):
     label_list = []
     feature_vector_list = []
     
     # Extract all file ID from database
     db_handler = DatabaseHandler()
     file_id_list = db_handler.extract_all_file_id()
     
     for file_id in file_id_list:
         label, feature_vector = self.extract_data(file_id, extraction_method, label_type)
         feature_vector_list.append(feature_vector)
         label_list.append(label)
     
     return label_list, feature_vector_list
Exemplo n.º 11
0
 def extract_optimization_level_label(self, file_id):
     db_handler = DatabaseHandler()
     
     optimization_level = db_handler.extract_optimization_level(file_id)
     if optimization_level == 'O0' or optimization_level == 'Od':
         return 0
     elif optimization_level == 'O1':
         return 1
     elif optimization_level == 'O2':
         return 2
     elif optimization_level == 'O3' or optimization_level == 'Ox':
         return 3
     else:
         print 'Error: unknown optimization level'
         sys.exit()
Exemplo n.º 12
0
 def __init__(self, id="main", master=None):
     DatabaseHandler.__init__(self)
     Thread.__init__(self)
     self._id = id
     if master is None :
         self._itemList = self._getItems()
         self._items = len(self._itemList)
         self._itemsParsedLock = Lock()
         self._itemsParsed = 0
         self.main = self
         self._power = True
         self._sync = False
         self._report = False
     else:
         self.main = master
         self._power = False
Exemplo n.º 13
0
    def __configure_simple_bot(self, behaviour_config):
        """Configures and returns the bot behaviour class.

        Args:
            behaviour_config (dict): A dictionary of configuration values pertaining to the
                behaviour.

        Returns:
            SimpleBotBehaviour: A class of functionality for the rsi bot behaviour.
        """

        exchange_interface = ExchangeInterface(self.config.exchanges)
        strategy_analyzer = StrategyAnalyzer()
        notifier = Notifier(self.config.notifiers)
        db_handler = DatabaseHandler(self.config.database)

        behaviour = SimpleBotBehaviour(
            behaviour_config,
            exchange_interface,
            strategy_analyzer,
            notifier,
            db_handler
        )

        return behaviour
Exemplo n.º 14
0
 def run(self, start_config_id = None):
     self.db = DatabaseHandler()
     print 'running'
     while True:
         if start_config_id is None:
             (config_id, model_id, ext_id, train_id,
                 dataset_id, random_seed, batch_size) \
                  = self.select_next_config(self.experiment_id)
         else:
             (config_id, model_id, ext_id, train_id,
                 dataset_id, random_seed, batch_size) \
                  = self.select_config(start_config_id)
         start_config_id = None
         
         (dataset_desc, input_space_id) = self.select_dataset(dataset_id)
         input_space = self.get_space(input_space_id)
         
         # build model
         model = self.get_model(model_id, 
                                random_seed, 
                                batch_size, 
                                input_space)
         
         # extensions
         extensions = self.get_extensions(ext_id)
         
         # prepare monitor
         self.prep_valtest_monitor(model, batch_size)
         
         # monitor based save best
         if self.mbsb_channel_name is not None:
             save_path = self.save_prefix+str(config_id)+"_best.pkl"
             extensions.append(MonitorBasedSaveBest(
                     channel_name = self.mbsb_channel_name,
                     save_path = save_path,
                     cost = False \
                 )
             )
         
         # HPS Logger
         extensions.append(
             HPSLog(self.log_channel_names, self.db, config_id)
         )
         
         # training algorithm
         algorithm = self.get_trainingAlgorithm(train_id, batch_size)
         
         print 'sgd complete'
         learner = Train(dataset=self.train_ddm,
                         model=model,
                         algorithm=algorithm,
                         extensions=extensions)
         print 'learning'     
         learner.main_loop()
         
         self.set_end_time(config_id)
Exemplo n.º 15
0
def update():
    """
    Update route.

    :return: a 500 error on failure or a JSON response on success
    """
    db = DatabaseHandler(app.config["DATABASE_FILE"],
                         app.config["CO2_MULTIPLIER"])
    json_return = dict()
    json_return["last24h"] = db.get_power_last_24h()
    json_return["dayTotal"] = db.get_current_day()
    json_return["total"] = round(db.get_current_total(), 0)
    json_return["co2"] = round(db.get_current_co2(), 2)
    json_return["update"] = db.get_last_update()
    db.close()
    return jsonify(json_return)
Exemplo n.º 16
0
    def configure_rsi_bot(self, behaviour_config):
        exchange_interface = ExchangeInterface(
            self.config.fetch_exchange_config())
        strategy_analyzer = StrategyAnalyzer(exchange_interface)
        notifier = Notifier(self.config.fetch_notifier_config())
        db_handler = DatabaseHandler(self.config.fetch_database_config())

        behaviour = RSIBot(behaviour_config, exchange_interface,
                           strategy_analyzer, notifier, db_handler)

        return behaviour
Exemplo n.º 17
0
 def __init__(self, cu, cu_instance):
     super().__init__()
     self.cuv = cu.version()
     self.database = DatabaseHandler(
         debug=True if self.cuv in DUMMY_IDS else False)
     self.drivers = {}
     self.setDefaultDrivers()
     self.comp_mode = COMP_MODE__TRAINING
     self.comp_duration = 0
     self.tts = TTSHandler()
     self.tts.start()
     self.main_stack = QStackedWidget(self)
     self.qualifyingseq = QualifyingSeq(self)
     self.main_stack.addWidget(self.qualifyingseq)
     self.threadtranslation = ThreadTranslation()
     self.main_stack.addWidget(self.threadtranslation)
     self.idle = IdleMonitor(cu=cu, cu_instance=cu_instance)
     self.bridge = CUBridge(cu=cu,
                            cu_instance=cu_instance,
                            selected_drivers=self.drivers,
                            tts=self.tts,
                            threadtranslation=self.threadtranslation)
     self.start_signal = StartSignal(cu=cu, cu_instance=cu_instance)
     self.grid = Grid(parent=self)
     self.home = Home(parent=self, database=self.database)
     self.settings = Settings(parent=self, database=self.database)
     self.resultlist = ResultList(parent=self)
     self.main_stack.addWidget(self.home)
     self.main_stack.addWidget(self.grid)
     self.main_stack.addWidget(self.settings)
     self.main_stack.addWidget(self.resultlist)
     self.bridge.update_grid.connect(self.grid.driver_change)
     self.bridge.comp_state.connect(self.comp_state_update)
     self.bridge.comp_finished.connect(self.comp_finished_all)
     self.start_signal.ready_to_run.connect(self.startAfterSignal)
     self.start_signal.show_lights.connect(self.grid.showLight)
     self.idle.update_state.connect(self.show_state)
     self.bridge.update_state.connect(self.show_state)
     self.start_signal.update_state.connect(self.show_state)
     self.setCentralWidget(self.main_stack)
     self.initUI()
Exemplo n.º 18
0
 def run(self, start_config_id = None):
     self.db = DatabaseHandler()
     print 'running'
     while True:
         
         (config_id, config_class, model, learner, algorithm) \
             = self.get_config(start_config_id)
         start_config_id = None
         print 'learning'     
         learner.main_loop()
         
         self.set_end_time(config_id)
Exemplo n.º 19
0
def populateDatabase():
    db = DatabaseHandler()
    db.connect()
    db.db.insert("users", "uniqueID", "12874", "username", "Johnny", "password", "123456789")
    db.db.insert("users", "uniqueID", "15322", "username", "John", "password", "123456789")
    db.db.insert("users", "uniqueID", "10573", "username", "George", "password", "123456789")
    db.close()
Exemplo n.º 20
0
    def __init__(self, run_event, queue, lcd, version_msg, fadeout_time,
                 db_name):
        threading.Thread.__init__(self)

        self.run_event = run_event
        self.queue = queue

        self.fadeout_time = fadeout_time

        self.lcd = lcd

        self.backlight_status = False
        self.message_list = list()
        self.current_message_num = 0

        self.loop_sleep = 0.10
        self.timeout_counter = 0

        self.version_msg = version_msg

        # LCD messages
        self.lcd.message(self.version_msg)

        self.display_msg = 0
        self.display_wx = 1
        self.display_wheel = 2
        self.display_clock = 3
        self.display_version = 4
        self.last_message = 4
        self.first_message = 0

        self.db = DatabaseHandler(db_name)

        if os.path.exists(db_name) is False:
            logger.info(
                "Database file {0} not found, creating".format(db_name))
            self.db.create_database()
Exemplo n.º 21
0
 def extract_feature_vector(self, file_id, extraction_method):
     db_handler = DatabaseHandler()
     
     if self.opcode_variety_ is None:
         self.set_opcode_variety_from_database()
     
     opcode_sequence = db_handler.extract_opcode_sequence(file_id)
     if extraction_method == 'bag-of-opcodes':
         feature_vector = self.extract_bag_of_opcodes(opcode_sequence)
     elif extraction_method == '2-gram':
         feature_vector = self.extract_ngram(opcode_sequence, 2)
     elif extraction_method == '3-gram':
         feature_vector = self.extract_ngram(opcode_sequence, 3)
     elif extraction_method == 'proposed':
         subroutine_sequence = db_handler.extract_subroutine_sequence(file_id)
         average_subroutine_length = self.extract_average_subroutine_length(subroutine_sequence)
         location_sequence = db_handler.extract_location_sequence(file_id)
         average_basicblock_length = self.extract_average_basicblock_length(location_sequence)
         # construct feature_vector here
     else:
         sys.stderr.write('Error: no extraction method "' + extraction_method + '" found.')
         sys.exit()
     
     return feature_vector
Exemplo n.º 22
0
    def __init__(self,
                 worker_name,
                 task_id,
                 base_channel_names=['train_objective'],
                 save_prefix="model_",
                 mbsb_channel_name='valid_hps_cost',
                 cache_dataset=True):

        self.worker_name = worker_name
        self.cache_dataset = cache_dataset
        self.task_id = task_id
        self.dataset_cache = {}

        self.base_channel_names = base_channel_names
        self.save_prefix = save_prefix
        # TODO store this in data for each experiment or dataset
        self.mbsb_channel_name = mbsb_channel_name
        self.db = DatabaseHandler()
Exemplo n.º 23
0
    def __configure_reporter(self, behaviour_config):
        """Configures and returns the reporter behaviour class.

        Args:
            behaviour_config (dict): A dictionary of configuration values pertaining to the
                behaviour.

        Returns:
            ReporterBehaviour: A class of functionality for the reporter behaviour.
        """

        exchange_interface = ExchangeInterface(self.config.exchanges)
        notifier = Notifier(self.config.notifiers)
        db_handler = DatabaseHandler(self.config.database)

        behaviour = ReporterBehaviour(behaviour_config, exchange_interface,
                                      notifier, db_handler)

        return behaviour
Exemplo n.º 24
0
    def __configure_rsi_bot(self, behaviour_config):
        """Configures and returns the rsi bot behaviour class.

        Args:
            behaviour_config (dict): A dictionary of configuration values pertaining to the
                behaviour.

        Returns:
            RSIBot: A class of functionality for the rsi bot behaviour.
        """

        exchange_interface = ExchangeInterface(
            self.config.get_exchange_config())
        strategy_analyzer = StrategyAnalyzer(exchange_interface)
        notifier = Notifier(self.config.get_notifier_config())
        db_handler = DatabaseHandler(self.config.get_database_config())

        behaviour = RSIBot(behaviour_config, exchange_interface,
                           strategy_analyzer, notifier, db_handler)

        return behaviour
Exemplo n.º 25
0
def add_movie(title):
    temp_json_loader = JsonLoader()
    temp_json_loader.load_movie_from_api(title)
    temp_database_handler = DatabaseHandler()
    temp_database_handler.insert_movies(temp_json_loader.data)
    click.echo(f'{title} movie has been added correctly.')
Exemplo n.º 26
0
class HPS:
    """
    Hyper Parameter Search
    
    Maps pylearn2 to a postgresql database. The idea is to accumulate 
    structured data concerning the hyperparameter optimization of 
    pylearn2 models and various datasets. With enough such structured data,
    one could train a meta-model that could be used for efficient 
    sampling of hyper parameter configurations.
    
    Jobman doesn't provide this since its data is unstructured and 
    decentralized. To centralize hyper parameter data, we would need to 
    provide it in the form of a ReSTful web service API.
    
    For now, I just use it instead of the jobman database to try various 
    hyperparameter configurations.
    
    """
    def __init__(self, 
                 experiment_id,
                 train_ddm, valid_ddm, 
                 log_channel_names,
                 test_ddm = None,
                 save_prefix = "model_",
                 mbsb_channel_name = None):
        self.experiment_id = experiment_id
        
        self.train_ddm = train_ddm
        self.valid_ddm = valid_ddm
        self.test_ddm = test_ddm
        self.monitoring_dataset = {'train': train_ddm}
        
        self.nvis = self.train_ddm.get_design_matrix().shape[1]
        self.nout = self.train_ddm.get_targets().shape[1]
        self.ntrain = self.train_ddm.get_design_matrix().shape[0]
        self.nvalid = self.valid_ddm.get_design_matrix().shape[0]
        self.ntest = 0
        if self.test_ddm is not None:
            self.ntest = self.test_ddm.get_design_matrix().shape[0]
        
        self.log_channel_names = log_channel_names
        self.save_prefix = save_prefix
        # TODO store this in data for each experiment or dataset
        self.mbsb_channel_name = mbsb_channel_name
        
        print "nvis, nout :", self.nvis, self.nout
        print "ntrain :", self.ntrain
        print "nvalid :", self.nvalid
        
    def run(self, start_config_id = None):
        self.db = DatabaseHandler()
        print 'running'
        while True:
            if start_config_id is None:
                (config_id, model_id, ext_id, train_id,
                    dataset_id, random_seed, batch_size) \
                     = self.select_next_config(self.experiment_id)
            else:
                (config_id, model_id, ext_id, train_id,
                    dataset_id, random_seed, batch_size) \
                     = self.select_config(start_config_id)
            start_config_id = None
            
            (dataset_desc, input_space_id) = self.select_dataset(dataset_id)
            input_space = self.get_space(input_space_id)
            
            # build model
            model = self.get_model(model_id, 
                                   random_seed, 
                                   batch_size, 
                                   input_space)
            
            # extensions
            extensions = self.get_extensions(ext_id)
            
            # prepare monitor
            self.prep_valtest_monitor(model, batch_size)
            
            # monitor based save best
            if self.mbsb_channel_name is not None:
                save_path = self.save_prefix+str(config_id)+"_best.pkl"
                extensions.append(MonitorBasedSaveBest(
                        channel_name = self.mbsb_channel_name,
                        save_path = save_path,
                        cost = False \
                    )
                )
            
            # HPS Logger
            extensions.append(
                HPSLog(self.log_channel_names, self.db, config_id)
            )
            
            # training algorithm
            algorithm = self.get_trainingAlgorithm(train_id, batch_size)
            
            print 'sgd complete'
            learner = Train(dataset=self.train_ddm,
                            model=model,
                            algorithm=algorithm,
                            extensions=extensions)
            print 'learning'     
            learner.main_loop()
            
            self.set_end_time(config_id)
            
    def get_classification_accuracy(self, model, minibatch, target):
        Y = model.fprop(minibatch, apply_dropout=False)
        return T.mean(T.cast(T.eq(T.argmax(Y, axis=1), 
                               T.argmax(target, axis=1)), dtype='int32'),
                               dtype=config.floatX)
    def prep_valtest_monitor(self, model, batch_size):
        minibatch = T.as_tensor_variable(
                        self.valid_ddm.get_batch_topo(batch_size), 
                        name='minibatch'
                    )
        target = T.matrix('target')
        Accuracy = self.get_classification_accuracy(model, minibatch, target)           
        monitor = Monitor.get_monitor(model)
        
        monitor.add_dataset(self.valid_ddm, 'sequential', batch_size)
        monitor.add_channel("Validation Classification Accuracy",
                            (minibatch, target),
                            Accuracy,
                            self.valid_ddm)
        monitor.add_channel("Validation Missclassification",
                            (minibatch, target),
                            1.0-Accuracy,
                            self.valid_ddm)
                            
        if self.test_ddm is not None:
            monitor.add_dataset(self.test_ddm, 'sequential', batch_size)
            monitor.add_channel("Test Classification Accuracy",
                                (minibatch, target),
                                Accuracy,
                                self.test_ddm)
                                
    def get_trainingAlgorithm(self, train_id, batch_size):
        #TODO add cost to db
        num_train_batch = (self.ntrain/batch_size)/8
        print "num training batches:", num_train_batch
        train_class = self.select_trainingAlgorithm(train_id)
        if train_class == 'stochasticgradientdescent':
            (learning_rate, term_id, init_momentum, train_iteration_mode,
             cost_id) = self.select_train_stochasticGradientDescent(train_id)
            termination_criterion = self.get_termination(term_id)
            cost = self.get_cost(cost_id)
            return SGD( learning_rate=learning_rate,
                        cost=cost,
                        batch_size=batch_size,
                        batches_per_iter=num_train_batch,
                        monitoring_dataset=self.monitoring_dataset,
                        termination_criterion=termination_criterion,
                        init_momentum=init_momentum,
                        train_iteration_mode=train_iteration_mode) 
        else:
            raise HPSData("training class not supported:"+train_class)
    def get_cost(self, cost_id):
        cost_class = self.select_cost(cost_id)
        if cost_class == 'methodcost':
            (method, supervised) = self.select_cost_methodCost(cost_id)
            return MethodCost(method=method, supervised=supervised)
        elif cost_class == 'weightdecay':
            coeff = self.select_cost_weightDecay(cost_id)
            return WeightDecay(coeffs=coeff)
        elif cost_class == 'multi':
            cost_array = self.select_cost_multi(cost_id)
            costs = []
            for sub_cost_id in cost_array:
                costs.append(self.get_cost(sub_cost_id))
            return SumOfCosts(costs)
        else:
            raise HPSData("cost class not supported:"+str(cost_class))
    def get_model(self, model_id, random_seed, batch_size, input_space):
        model_class = self.select_model(model_id)
        if model_class == 'mlp':
            (input_layer_id, output_layer_id) \
                 = self.select_model_mlp(model_id)
            
            # TODO allow nesting of MLPs
            # TODO add dropout to layers
            # TODO add full graph capability to MLP 
            # and this part (should be made recursive):
            # TODO refactor get_graph
            
            input_layer = self.get_layer(input_layer_id)
            layers = [input_layer]
            prev_layer_id = input_layer_id
            while True:
                next_layer_id \
                    = self.select_output_layer(model_id, prev_layer_id)
                next_layer = self.get_layer(next_layer_id)
                layers.append(next_layer)
                if next_layer_id == output_layer_id:
                    # we have reached the end of the graph:
                    break
                prev_layer_id = next_layer_id
            
            # temporary hack until we get graph version of MLP:
            dropout_probs = []
            dropout_scales = []
            for layer in layers:
                dropout_probs.append(layer.dropout_prob)
                dropout_scales.append(layer.dropout_scale)
            # output layer is always called "output":
            layers[-1].layer_name = "output"
            # create MLP:
            model = MLP(layers,
                        input_space=input_space,
                        batch_size=batch_size,
                        dropout_probs=dropout_probs,
                        dropout_scales=dropout_probs,
                        random_seed=random_seed)   
            print 'mlp is built'
            return model
    def get_layer(self, layer_id):
        """Creates a Layer instance from its definition in the database."""
        (layer_class, layer_name, dim, 
         dropout_prob, dropout_scale) = self.select_layer(layer_id)
        if layer_class == 'maxout':
            (num_units,
             num_pieces,
             pool_stride,
             randomize_pools,
             irange,
             sparse_init,
             sparse_stdev,
             include_prob,
             init_bias,
             W_lr_scale,
             b_lr_scale,
             max_col_norm,
             max_row_norm) = self.select_layer_maxout(layer_id)
            layer = Maxout(num_units,
                             num_pieces,
                             pool_stride,
                             randomize_pools,
                             irange,
                             sparse_init,
                             sparse_stdev,
                             include_prob,
                             init_bias,
                             W_lr_scale,
                             b_lr_scale,
                             max_col_norm,
                             max_row_norm)
        elif layer_class == 'linear':
            (init_id, init_bias, 
             W_lr_scale, b_lr_scale, 
             max_row_norm, max_col_norm) = self.select_layer_linear(layer_id)
            init_weights = self.get_init(init_id)
            layer = Linear(dim=dim, layer_name=layer_name, 
                           init_weights=init_weights, init_bias=init_bias,
                           W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale,                 
                           max_row_norm=max_row_norm, 
                           max_col_norm=max_col_norm)
        elif layer_class == 'tanh':
            (init_id, init_bias, 
             W_lr_scale, b_lr_scale, 
             max_row_norm, max_col_norm) = self.select_layer_tanh(layer_id)
            init_weights = self.get_init(init_id)
            layer = Tanh(dim=dim, layer_name=layer_name, 
                           init_weights=init_weights, init_bias=init_bias,
                           W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale,                 
                           max_row_norm=max_row_norm, 
                           max_col_norm=max_col_norm)
        elif layer_class == 'sigmoid':
            (init_id, init_bias, 
             W_lr_scale, b_lr_scale, 
             max_row_norm, max_col_norm) \
                 = self.select_layer_sigmoid(layer_id) 
            init_weights = self.get_init(init_id)
            layer = Sigmoid(dim=dim, layer_name=layer_name, 
                           init_weights=init_weights, init_bias=init_bias,
                           W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale,                 
                           max_row_norm=max_row_norm, 
                           max_col_norm=max_col_norm)
        elif layer_class == 'softmaxpool':
            (detector_layer_dim, pool_size,
	        init_id, init_bias,
	        W_lr_scale, b_lr_scale) \
                 = self.select_layer_softmaxpool(layer_id) 
            init_weights = self.get_init(init_id)
            layer = SoftmaxPool(detector_layer_dim=detector_layer_dim, 
                           layer_name=layer_name, pool_size=pool_size,
                           init_weights=init_weights, init_bias=init_bias,
                           W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale)
        elif layer_class == 'softmax':
            (init_id, init_bias, 
             W_lr_scale, b_lr_scale, 
             max_row_norm, max_col_norm) \
                 = self.select_layer_softmax(layer_id) 
            init_weights = self.get_init(init_id)
            layer = Softmax(dim=dim, layer_name=layer_name, 
                           init_weights=init_weights, init_bias=init_bias,
                           W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale,                 
                           max_row_norm=max_row_norm, 
                           max_col_norm=max_col_norm)
        elif layer_class == 'rectifiedlinear':
            (init_id, init_bias, 
             W_lr_scale, b_lr_scale, 
             max_row_norm, max_col_norm,
             left_slope) = self.select_layer_rectifiedlinear(layer_id) 
            init_weights = self.get_init(init_id)
            layer = RectifiedLinear(dim=dim, layer_name=layer_name, 
                           init_weights=init_weights, init_bias=init_bias,
                           W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale,                 
                           max_row_norm=max_row_norm, 
                           max_col_norm=max_col_norm, left_slope=left_slope)
        elif layer_class == 'convrectifiedlinear':
            (output_channels, kernel_shape, pool_shape,
             pool_stride, border_mode, init_id,
             init_bias, W_lr_scale,
             b_lr_scale, left_slope,
             max_kernel_norm) \
                 = self.select_layer_convrectifiedlinear(layer_id) 
            init_weights = self.get_init(init_id)
            layer = ConvRectifiedLinear(output_channels=output_channels,
                        kernel_shape=(kernel_shape, kernel_shape),
                        pool_shape=(pool_shape, pool_shape),
                        pool_stride=(pool_stride, pool_stride),
                        layer_name=layer_name, init_weights=init_weights, 
                        init_bias=init_bias,
                        W_lr_scale=W_lr_scale, b_lr_scale=b_lr_scale,                 
                        max_kernel_norm=max_kernel_norm, 
                        left_slope=left_slope)
        layer.dropout_prob = dropout_prob
        layer.dropout_scale= dropout_scale
        return layer
    def get_termination(self, term_id):
        term_class = self.select_termination(term_id)
        if term_class == 'epochcounter':
            max_epochs = self.select_term_epochCounter(term_id)
            return EpochCounter(max_epochs)
        elif term_class == 'monitorbased':
            (proportional_decrease, max_epochs, channel_name) \
                = self.select_term_monitorBased(term_id)
            return MonitorBased(prop_decrease = proportional_decrease, 
                                N = max_epochs, channel_name = channel_name)
        elif term_class == 'or':
            term_array = self.select_term_or(term_id)
            terminations = []
            for sub_term_id in term_array:
                terminations.append(self.get_termination(sub_term_id))
            return Or(terminations)
        elif term_class == 'and':
            term_array = self.select_term_and(term_id)
            terminations = []
            for sub_term_id in term_array:
                terminations.append(self.get_termination(sub_term_id))
            return And(terminations)
        else:
            raise HPSData("Termination class not supported:"+term_class)
    def get_space(self, space_id):
        space_class = self.select_space(space_id)
        if space_class == 'conv2dspace':
            (num_row, num_column, num_channels) \
                = self.select_space_conv2DSpace(space_id)
            return Conv2DSpace(shape=(num_row, num_column), 
                               num_channels=num_channels)
        else:
            raise HPSData("Space class not supported:"+str(space_class))
    def get_init(self, init_id):
        init_class = self.select_init(init_id)
        if init_class == 'uniform':
            init_range = self.select_init_uniform(init_id)
            return Uniform(init_range = init_range)
        elif init_class == 'normal':
            stdev = self.select_init_normal(init_id)
            return Normal(stdev = stdev)
        elif init_class == 'sparse':
            (sparseness, stdev) = self.select_init_sparse(init_id)
            return Sparse(sparseness=sparseness, stdev=stdev)
        elif init_class == 'uniformconv2d':
            init_range = self.select_init_uniformConv2D(init_id)
            return UniformConv2D(init_range = init_range)
        else:
            raise HPSData("init class not supported:"+str(init_class))
    def get_extensions(self, ext_id):
        if ext_id is None:
            return []
        ext_class = self.select_extension(ext_id)
        if ext_class == 'exponentialdecayoverepoch':
            (decay_factor, min_lr) \
                =  self.select_ext_exponentialDecayOverEpoch(ext_id)
            return [ExponentialDecayOverEpoch(decay_factor=decay_factor,
                                             min_lr=min_lr)]
        elif ext_class == 'momentumadjustor':
            (final_momentum, start_epoch, saturate_epoch) \
                = self.select_ext_momentumAdjustor(ext_id)
            return [MomentumAdjustor(final_momentum=final_momentum,
                                    start=start_epoch, 
                                    saturate=saturate_epoch)]
        elif ext_class == 'multi':
            ext_array = self.select_ext_multi(ext_id)
            extensions = []
            for sub_ext_id in ext_array:
                extensions.extend(self.get_extensions(sub_ext_id))
            return extensions
        else:
            raise HPSData("ext class not supported:"+str(ext_class))
    def set_end_time(self, config_id):
        return self.db.executeSQL("""
        UPDATE hps.config 
        SET end_time = now()
        WHERE config_id = %s
        """, (config_id,), self.db.COMMIT)  
    def set_accuracy(self, config_id, accuracy):
        return self.db.executeSQL("""
        INSERT INTO hps.validation_accuracy (config_id, accuracy)
        VALUES (%s, %s)
        """, (config_id, accuracy), self.db.COMMIT)  
    def select_trainingAlgorithm(self, train_id):
        row = self.db.executeSQL("""
        SELECT train_class
        FROM hps.trainingAlgorithm
        WHERE train_id = %s
        """, (train_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No trainingAlgorithm for train_id="+str(train_id))
        return row[0]
    def select_train_stochasticGradientDescent(self, train_id):
        row = self.db.executeSQL("""
        SELECT learning_rate, term_id, init_momentum, train_iteration_mode,
               cost_id
        FROM hps.train_stochasticGradientDescent
        WHERE train_id = %s
        """, (train_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No stochasticGradientDescent for train_id=" \
                +str(train_id))
        return row
    def select_termination(self, term_id):
        row = self.db.executeSQL("""
        SELECT term_class
        FROM hps.termination
        WHERE term_id = %s
        """, (term_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No termination for term_id="+str(term_id))
        return row[0]
    def select_term_epochCounter(self, term_id):
        row = self.db.executeSQL("""
        SELECT max_epochs
        FROM hps.term_epochcounter
        WHERE term_id = %s
        """, (term_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No epochCounter term for term_id="+str(term_id))
        return row[0]
    def select_term_monitorBased(self, term_id):
        row = self.db.executeSQL("""
        SELECT proportional_decrease, max_epoch, channel_name
        FROM hps.term_monitorBased
        WHERE term_id = %s
        """, (term_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No monitorBased term for term_id="+str(term_id))
        return row
    def select_term_and(self, term_id):
        row = self.db.executeSQL("""
        SELECT term_array
        FROM hps.term_and
        WHERE term_id = %s
        """, (term_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No And term for term_id="+str(term_id))
        return row[0]
    def select_term_or(self, term_id):
        row = self.db.executeSQL("""
        SELECT term_array
        FROM hps.term_or
        WHERE term_id = %s
        """, (term_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No Or term for term_id="+str(term_id))
        return row[0]
    def select_space(self, space_id):
        row = self.db.executeSQL("""
        SELECT space_class
        FROM hps.space
        WHERE space_id = %s
        """, (space_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No space for space_id="+str(space_id))
        return row[0]
    def select_space_conv2DSpace(self, space_id):
        row = self.db.executeSQL("""
        SELECT num_row, num_column, num_channel
        FROM hps.space_conv2DSpace
        WHERE space_id = %s
        """, (space_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No conv2DSpace for space_id="+str(space_id))
        return row
    def select_cost(self, cost_id):
        row = self.db.executeSQL("""
        SELECT cost_class
        FROM hps.cost
        WHERE cost_id = %s
        """, (cost_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No cost for cost_id="+str(cost_id))
        return row[0]
    def select_cost_methodCost(self, cost_id):
        row = self.db.executeSQL("""
        SELECT method_name, supervised
        FROM hps.cost_methodCost
        WHERE cost_id = %s
        """, (cost_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No methodCost for cost_id="+str(cost_id))
        return row
    def select_cost_weightDecay(self, cost_id):
        row = self.db.executeSQL("""
        SELECT decay_coeff
        FROM hps.cost_weightDecay
        WHERE cost_id = %s
        """, (cost_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No weightDecay for cost_id="+str(cost_id))
        return row[0]   
    def select_cost_multi(self, cost_id):
        row = self.db.executeSQL("""
        SELECT cost_array
        FROM hps.cost_multi
        WHERE cost_id = %s
        """, (cost_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No multi cost for cost_id="+str(cost_id))
        return row[0] 
    def select_extension(self, ext_id):
        row = self.db.executeSQL("""
        SELECT ext_class
        FROM hps.extension
        WHERE ext_id = %s
        """, (ext_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No extension for ext_id="+str(ext_id))
        return row[0]
    def select_ext_exponentialDecayOverEpoch(self, ext_id):
        row = self.db.executeSQL("""
        SELECT decay_factor, min_lr
        FROM hps.ext_exponentialDecayOverEpoch
        WHERE ext_id = %s
        """, (ext_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No exponentialDecayOverEpoch ext for ext_id=" \
                +str(ext_id))
        return row
    def select_ext_momentumAdjustor(self, ext_id):
        row = self.db.executeSQL("""
        SELECT final_momentum, start_epoch, saturate_epoch
        FROM hps.ext_momentumAdjustor
        WHERE ext_id = %s
        """, (ext_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No momentumAdjustor extension for ext_id=" \
                +str(ext_id))
        return row
    def select_ext_multi(self, ext_id):
        row = self.db.executeSQL("""
        SELECT ext_array
        FROM hps.ext_multi
        WHERE ext_id = %s
        """, (ext_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No multiple extension for ext_id=" \
                +str(ext_id))
        return row[0]
    def select_output_layer(self, model_id, input_layer_id):
        row = self.db.executeSQL("""
        SELECT output_layer_id
        FROM hps.mlp_graph AS a
        WHERE (model_id, input_layer_id) = (%s, %s)
        """, (model_id, input_layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No output layer for input layer_id=" \
                +str(input_layer_id)+" and model_id="+str(model_id))
        return row[0]
    def select_next_config(self, experiment_id):
        row = None
        for i in xrange(10):
            c = self.db.conn.cursor()
            c.execute("""
            BEGIN;
    
            SELECT  config_id, model_id, ext_id, train_id,
                    dataset_id, random_seed, batch_size  
            FROM hps.config 
            WHERE experiment_id = %s AND start_time IS NULL 
            LIMIT 1 FOR UPDATE;
            """, (experiment_id,))
            row = c.fetchone()
            if row is not None and row:
                break
            time.sleep(0.1)
            c.close()
        if not row or row is None:
            raise HPSData("No more configurations for experiment_id=" \
                +str(experiment_id)+" "+row)
        (config_id, model_id, ext_id, train_id,
         dataset_id, random_seed, batch_size) = row
        c.execute("""
        UPDATE hps.config
        SET start_time = now() 
        WHERE config_id = %s;
        """, (config_id,))
        self.db.conn.commit()
        c.close()
        return (config_id, model_id, ext_id, train_id,
                dataset_id, random_seed, batch_size)
    def select_config(self, config_id):
        row = None
        for i in xrange(10):
            c = self.db.conn.cursor()
            c.execute("""
            BEGIN;
    
            SELECT  config_id, model_id, ext_id, train_id,
                    dataset_id, random_seed, batch_size  
            FROM hps.config 
            WHERE config_id = %s 
            LIMIT 1 FOR UPDATE;
            """, (config_id,))
            row = c.fetchone()
            if row is not None and row:
                break
            time.sleep(0.1)
            c.close()
        if not row or row is None:
            raise HPSData("No more configurations for config_id=" \
                +str(config_id)+", row:"+str(row))
        (config_id, model_id, ext_id, train_id,
         dataset_id, random_seed, batch_size) = row
        c.execute("""
        UPDATE hps.config
        SET start_time = now() 
        WHERE config_id = %s;
        """, (config_id,))
        self.db.conn.commit()
        c.close()
        return (config_id, model_id, ext_id, train_id,
                dataset_id, random_seed, batch_size)
    def select_model(self, model_id):
        row = self.db.executeSQL("""
        SELECT model_class
        FROM hps.model
        WHERE model_id = %s
        """, (model_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No model for model_id="+str(model_id))
        return row[0]
    def select_model_mlp(self, model_id):
        row = self.db.executeSQL("""
        SELECT input_layer_id, output_layer_id
        FROM hps.model_mlp
        WHERE model_id = %s
        """, (model_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No configuration for model_id="+str(model_id))
        return row
    def select_dataset(self, dataset_id):
        row = self.db.executeSQL("""
        SELECT dataset_desc, input_space_id
        FROM hps.dataset
        WHERE dataset_id = %s
        """, (dataset_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No dataset for dataset_id="+str(dataset_id))
        return row
    def select_layer(self, layer_id):
        row = self.db.executeSQL("""
        SELECT layer_class, layer_name, dim, dropout_prob, dropout_scale
        FROM hps.layer
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No layer for layer_id="+str(layer_id))
        return row
    def select_layer_linear(self, layer_id):
        row = self.db.executeSQL("""
        SELECT init_id, init_bias, 
               W_lr_scale, b_lr_scale,
               max_row_norm, max_col_norm
        FROM hps.layer_linear
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No linear layer for layer_id="+str(layer_id))
        return row
    def select_layer_maxout(self, layer_id):
        row = self.db.executeSQL("""
        SELECT   num_units,
                 num_pieces,
                 pool_stride,
                 randomize_pools,
                 irange,
                 sparse_init,
                 sparse_stdev,
                 include_prob,
                 init_bias,
                 W_lr_scale,
                 b_lr_scale,
                 max_col_norm,
                 max_row_norm
        FROM hps.layer_maxout
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No maxout layer for layer_id="+str(layer_id))
        return row
    def select_layer_softmax(self, layer_id):
        row = self.db.executeSQL("""
        SELECT init_id, init_bias, 
               W_lr_scale, b_lr_scale,
               max_row_norm, max_col_norm
        FROM hps.layer_softmax
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No softmax layer for layer_id="+str(layer_id))
        return row
    def select_layer_rectifiedlinear(self, layer_id):
        row = self.db.executeSQL("""
        SELECT init_id, init_bias, 
               W_lr_scale, b_lr_scale,
               max_row_norm, max_col_norm,
               left_slope
        FROM hps.layer_rectifiedlinear
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No rectifiedlinear layer for layer_id="\
                +str(layer_id))
        return row
    def select_layer_softmaxpool(self, layer_id):
        row = self.db.executeSQL("""
        SELECT detector_layer_dim	, pool_size,
	         init_id, init_bias,
	         W_lr_scale, b_lr_scale
        FROM hps.layer_softmaxpool
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No softmaxpool layer for layer_id="+str(layer_id))
        return row
    def select_layer_convrectifiedlinear(self, layer_id):
        row = self.db.executeSQL("""
        SELECT  output_channels, kernel_shape, pool_shape,
                pool_stride, border_mode, init_id,
                init_bias, W_lr_scale,
                b_lr_scale, left_slope,
                max_kernel_norm
        FROM hps.layer_convrectifiedlinear
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No convrectifiedlinear layer for layer_id=" \
                +str(layer_id))
        return row
    def select_init(self, init_id):
        row = self.db.executeSQL("""
        SELECT init_class
        FROM hps.init
        WHERE init_id = %s
        """, (init_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No init weights for init_id="+str(init_id))
        return row[0]
    def select_init_uniformConv2D(self, init_id):
        row = self.db.executeSQL("""
        SELECT init_range
        FROM hps.init_uniformConv2D
        WHERE init_id = %s
        """, (init_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No init_uniformConv2D for init_id="+str(init_id))
        return row[0]
    def select_init_uniform(self, init_id):
        row = self.db.executeSQL("""
        SELECT init_range
        FROM hps.init_uniform
        WHERE init_id = %s
        """, (init_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No init_uniform for init_id="+str(init_id))
        return row[0]
    def select_init_normal(self, init_id):
        row = self.db.executeSQL("""
        SELECT init_stdev
        FROM hps.init_normal
        WHERE init_id = %s
        """, (init_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No init_normal for init_id="+str(init_id))
        return row[0]
    def select_init_sparse(self, init_id):
        row = self.db.executeSQL("""
        SELECT init_sparseness, init_stdev
        FROM hps.init_sparse
        WHERE init_id = %s
        """, (init_id,), self.db.FETCH_ONE)  
        if not row or row is None:
            raise HPSData("No init_sparse for init_id="+str(init_id))
        return row
    def select_preprocess(self, preprocess_id):
        row =  self.db.executeSQL("""
        SELECT dataset_desc, dataset_nvis
        FROM hps.dataset
        WHERE dataset_id = %s
        """, (preprocess_id,), self.db.FETCH_ONE)
Exemplo n.º 27
0
    def __init__(self):
		start = time.time()
		DatabaseHandler.__init__(self)
		#init SQL Functions:
		self.executeSQL('''
         CREATE OR REPLACE FUNCTION public.measure_density(item_key INT4, cluster_key INT4)
            RETURNS FLOAT8 AS $$
         SELECT GREATEST(SUM(similarity), 0.0000000000000001) AS sum
         FROM %s AS a, public.itemclusters AS b
         WHERE $1 = a.tail AND a.head = b.item_key AND b.cluster_key = $2
         $$ LANGUAGE 'SQL';
      ''' % (ITEMLINKS_TABLE,))
		self.executeSQL('''
        CREATE OR REPLACE FUNCTION public.get_clustering_statistics()
                RETURNS TABLE(count INT4, maxsum FLOAT8, maxcount INT2, sum FLOAT8) AS $$
        SELECT COUNT(*)::INT4, MAX(sum), MAX(count)::INT2, SUM(sum)
        FROM    (
                SELECT cluster_key, COUNT(*), SUM(density)
                FROM public.itemclusters
                GROUP BY cluster_key
                ORDER BY sum DESC
                ) AS foo
        WHERE sum > 50
        ; $$ LANGUAGE 'SQL';
		''')
		self.executeSQL('''
        CREATE OR REPLACE FUNCTION public.measure_density_for_transfer(affected_item INT4, transfered_item INT4)
            RETURNS FLOAT8 AS $$
        SELECT GREATEST(SUM(similarity), 0.000001) AS sum
		FROM 	(
			SELECT cluster_key
			FROM public.itemclusters
			WHERE item_key = $1
			) AS a, %s AS b, public.itemclusters AS c
		WHERE $1 = b.tail AND b.head = c.item_key AND c.item_key != $2 AND c.cluster_key = a.cluster_key
		; $$ LANGUAGE 'SQL';
		''' % (ITEMLINKS_TABLE,))
		self.executeSQL('''
        CREATE OR REPLACE FUNCTION public.transfer_item_to_cluster(item INT4, new_cluster INT4)
            RETURNS VOID  AS $$
        --Update density of all items in previous_cluster itemlinking the item:
        UPDATE public.itemclusters
        SET density = public.measure_density_for_transfer(item_key, $1)
        FROM 	(
                SELECT cluster_key AS previous_cluster
                FROM public.itemclusters
                WHERE item_key = $1
                ) AS a, 
                (
				SELECT head AS item
				FROM %s
				WHERE $1 = tail
                ) AS b
        WHERE b.item = item_key AND cluster_key = a.previous_cluster;
        --transfer the item to new_cluster:
        UPDATE public.itemclusters
        SET cluster_key = $2, density = public.measure_density($1, $2)
        WHERE item_key = $1;
        --Update item_density of all items in the new_cluster itemlinking the item:
        UPDATE public.itemclusters
        SET density = public.measure_density(item_key, $2)
        FROM 	(
				SELECT head AS item
				FROM %s
				WHERE $1 = tail
				) AS a
        WHERE a.item = item_key AND cluster_key = $2
        ; $$ LANGUAGE 'SQL';
		''' % (ITEMLINKS_TABLE, ITEMLINKS_TABLE))
		self.executeSQL('''
        CREATE OR REPLACE FUNCTION public.get_badly_clustered_item_from_cluster(cluster INT4)
            RETURNS INT4 AS $$
        SELECT item_key
        FROM public.itemclusters
        WHERE cluster_key = $1
        ORDER BY density ASC 
        LIMIT 1
        ; $$ LANGUAGE 'SQL';
        ''')
        #get clusters:
		clusters = self.executeSQL("SELECT DISTINCT cluster_key FROM public.itemclusters", action=self.FETCH_ALL)
		self.clusters = {}
		self.clusterList = []
		for (cluster_key,) in clusters:
			self.clusters[cluster_key] = Cluster(cluster_key)
			self.clusterList.append(cluster_key)
		self.num_clusters = len(self.clusters)
		self.taskLock = Lock()
		self.nextTask = 1
		#for real time (periodic online) statistics:
		self.taskings = ZERO; self.changes = ZERO; self.tries = ZERO;
		self.loopFailures = 0;
		self.lastTimeCheck = time.time()
		self.lastSync = time.time()
		#init workers:
		InvisibleHand.__init__(self, ClusteringWorker)
		print "system initialized in", time.time() - start, "secs"
Exemplo n.º 28
0
cmd:text('Generate torch dump for billion-words dataset')
cmd:text('Example:')
cmd:text('$> th postgres2torch.lua --dataset train')
cmd:text('$> th postgres2torch.lua --treeTable "bw.word_cluster" --treeFile "word_tree1.th7"')
cmd:text('Options:')
cmd:option('--dataset', 'train', 'train | valid | test | small | tiny')
cmd:option('--stepSize', 1000, 'amount of sentences to retrieve per query')

cmd:option('--wordMap', false, 'outputs a mapping of word strings to word integers')

cmd:option('--treeTable', '', 'Name of the table containing a hierarchy of words. Used for hierarchical softmaxes.')
cmd:option('--treeFile', '', 'Name of the file where to save the torch dump.')

opt = cmd:parse(arg or {})

local pg = DatabaseHandler() #python

# where to save data:
local data_path = paths.concat(dp.DATA_DIR, 'BillionWords')
dp.check_and_mkdir(data_path)

# python
n_word = pg.executeSQL('''
      SELECT SUM(array_upper(sentence_words, 1)-1) FROM bw.%s_sentence
      ''', (self.sbjCluster.key, self.sbjCluster.key), pg.FETCH_ONE
   )[0]

local data = torch.IntTensor(n_word, 2)
--sequence of words where sentences are delimited by </s>
local corpus = data:select(2, 2)
--holds start indices of sentence of word at same index in corpus
Exemplo n.º 29
0
class Application(object):

    def __init__(self):
        self.db_handler = DatabaseHandler()
        self.cli = CommandLineInterface()

    def run(self):
        #  database.perform_initial_setup()
        quit = False
        while not quit:
            activity = self.cli.main_menu()
            translations = self.load_translations()

            if activity == "Translate":
                self.log_translations()

            elif activity == "Practice":
                self.practice(translations)

            quit = activity == "quit"

        self.save_translations(translations)
        print("Saved your progress. Good work, have a great day!")

    def log_translations(self):
        print("\nTo exit, leave one or more fields empty.\n")
        while True:
            translation = self.cli.register_translation()
            native_word = translation[0]
            foreign_word = translation[1]
            if not (native_word and foreign_word):
                break
            self.db_handler.add_native_word(native_word)
            self.db_handler.add_foreign_word(foreign_word)
            self.db_handler.add_translation(native_word, foreign_word)
            print("\n Added translation!")

    def practice(self, translations):
        print("\nTo exit, leave answer empty.\n")
        while True:
            translation = self.get_random_translation(translations)
            answer = self.cli.translate(translation.get_native_word())
            if not answer:
                if self.cli.verify_quit() == "yes" or "1" or "quit" or "exit":
                    break
            if answer.lower() == translation.get_foreign_word().lower():
                score_change = 1
                print("\nCorrect!\n")
            else:
                print("\nWrong, correct answer: {0}\n".format(
                    translation.get_foreign_word()))
                score_change = -2
            self.update_difficulty(translations, translation, score_change)
        self.save_translations(translations)

    def load_translations(self):
        translations = {"hard": [], "medium": [], "easy": []}
        for db_translation in self.db_handler.load_translations():
            score = db_translation[2]
            translations[self.get_difficulty(score)].append(
                Translation(db_translation[0], db_translation[1], score))
        return translations

    def save_translations(self, translations):
        for category in translations:
            for translation in translations[category]:
                self.db_handler.update_translation_score(
                    translation.get_native_word(),
                    translation.get_foreign_word(),
                    translation.get_score())
        self.db_handler.commit_changes()

    def get_random_translation(self, translations):
        while True:
            translation_list = translations[self.random_difficulty()]
            if translation_list:
                return random.choice(translation_list)

    def get_difficulty(self, score):
        difficulty = "hard"
        if score > 10:
            if score < 20:
                difficulty = "medium"
            else:
                difficulty = "easy"
        return difficulty

    def random_difficulty(self):
        difficulty = random.randint(1, 101)
        if difficulty < 46:
            return "hard"
        elif difficulty < 81:
            return "medium"
        return "easy"

    def update_difficulty(self, translations, translation, score_delta):
        score = translation.get_score()
        previous_difficulty = self.get_difficulty(score)
        difficulty = self.get_difficulty(score + score_delta)
        translation.update_score(score_delta)
        if difficulty != previous_difficulty:
            translations[previous_difficulty].remove(translation)
            translations[difficulty].append(translation)
Exemplo n.º 30
0
# coding=utf-8

import numpy
import random
import pylab
from database import DatabaseHandler
import sys

if __name__ == "__main__":
    config_id = int(sys.argv[1])
    max_error = float(sys.argv[2])
    db = DatabaseHandler()
    rows = db.executeSQL("""
    SELECT 	a.epoch_count, a.channel_value AS train_cost,
        hps3.get_channel(a.config_id::INT4, 'valid_hps_cost'::VARCHAR, a.epoch_count) AS valid_error, 
        hps3.get_channel(a.config_id::INT4, 'test_hps_cost'::VARCHAR, a.epoch_count) AS test_error
    FROM hps3.training_log AS a
    WHERE a.config_id = %s AND a.channel_name = 'train_objective'
    ORDER BY epoch_count ASC
    """,(config_id,),db.FETCH_ALL)
    
    error = numpy.asarray(rows)
    
    pylab.xlabel('epoch')
    pylab.ylabel('error')
    pylab.axis([0, error.shape[0], 0, max_error])
    pylab.plot(error[:,0], error[:,1], 'g', label='Training Error')
    pylab.plot(error[:,0], error[:,2],'r', label='Validation Error')
    pylab.plot(error[:,0] , error[:,3],'b', label="Test Error")
    pylab.legend()
    pylab.show()
Exemplo n.º 31
0
class Nonogram:
    def __init__(self):
        self.width = 0  # number of columns
        self.height = 0  # number of rows
        self.rows = []  # 2d array storing hints for rows
        self.cols = []  # 2d array storing hints for columns
        self.board = []  # 2D numpy array storing board tiles state

        self.db_handler = DatabaseHandler(
            'puzzles.db'
        )  # pointer to database handler used for loading puzzles from database
        self.solution = [
        ]  # 2d array storing solution of the puzzle (1st row, 2nd row, etc.)

    def init_game(self, width, height, rows, cols):
        self.width = width
        self.height = height
        self.rows = np.array(rows)
        self.cols = np.array(cols)
        self.board = np.zeros((self.height, self.width), dtype=int)

    def load_from_file(self, filename):
        f = open(filename, 'r')

        # read dimensions of the puzzle
        buffer = f.readline()
        self.height, self.width = [int(s) for s in buffer.split(' ')]

        # read hints for rows
        buffer = f.readline()
        self.rows = np.array(ast.literal_eval(buffer))

        # read hints for columns
        buffer = f.readline()
        self.cols = np.array(ast.literal_eval(buffer))

        f.close()
        self.board = np.zeros((self.height, self.width), dtype=int)

    def load_from_db(self, puzzle_id):
        sql_select = self.db_handler.select_square_data_by_id(puzzle_id)
        game_data = sql_select[0]
        self.width = game_data[2]
        self.height = game_data[1]
        self.rows = np.array(ast.literal_eval(game_data[3]))
        self.cols = np.array(ast.literal_eval(game_data[4]))

        self.board = np.zeros((self.height, self.width), dtype=int)

    @staticmethod
    # converts line of tiles to list containing lengths of continuous colored fields
    def prepare_line(line):
        data = []
        index = 0
        while index < len(line):
            tmp = 0
            while index < len(line) and line[index] == 1:
                tmp += 1
                index += 1
            if tmp > 0:
                data.append(tmp)
            index += 1
        return np.array(data, dtype=int)

    def get_solution_from_file(self, filename):
        self.solution = []
        try:
            f = open(filename, 'r')
        except FileNotFoundError:
            print("File does not exist!")
            return

        for _ in range(len(self.rows)):
            buffer = f.readline()
            if buffer is not '':
                self.solution.append([int(s) for s in buffer.split(' ')])

    def check_solution(self):
        # check if every row is correct
        for index, row in enumerate(self.rows):
            a = self.prepare_line(self.board[index])
            if not np.array_equal(row, a):
                print("WRONG ROW", row, a, index)
                return False
        # check if every column is correct
        for index, col in enumerate(self.cols):
            a = self.prepare_line(self.board[:, index])
            if not np.array_equal(col, a):
                print("WRONG COL", col, a, index)
                return False
        print("CORRECT")
        return True

    def get_board_row(self, index):
        """Method for retriving given row"""
        if index < 0 or index > self.height:
            raise IndexError(
                f"Row index out of range! Allowed indexes are 0-{self.height-1}; given index: {index}"
            )
        return self.board[index].copy()

    def get_board_column(self, index):
        """Method for retriving given column"""
        if index < 0 or index > self.width:
            raise IndexError(
                f"Column index out of range! Allowed indexes are 0-{self.width-1}; given index: {index}"
            )
        return self.board[:, index].copy()

    def get_board_tile(self, x, y):
        """Method for getting value of given tile"""
        if x < 0 or x > self.width:
            raise IndexError(
                f"X coordinate out of range! Allowed indexes are 0-{self.width}; given: {x}"
            )
        if y < 0 or y > self.height:
            raise IndexError(
                f"Y coordinate out of range! Allowed indexes are 0-{self.height}; given: {y}"
            )
        return self.board[y][x]

    def get_hints_row(self, index):
        """Method for retriving all hints for given row"""
        if index < 0 or index > self.height:
            raise IndexError(
                f"Row index out of range! Allowed indexes are 0-{self.height-1}; given index: {index}"
            )
        return self.rows[index]

    def get_hints_column(self, index):
        """Method for retriving all hints for given column"""
        if index < 0 or index > self.width:
            raise IndexError(
                f"Column index out of range! Allowed indexes are 0-{self.width-1}; given index: {index}"
            )
        return self.cols[index]

    def set_board_tile(self, x, y, value):
        """Method for setting value of given tile"""
        if x < 0 or x > self.width:
            raise IndexError(
                f"X coordinate out of range! Allowed indexes are 0-{self.width}; given: {x}"
            )
        if y < 0 or y > self.height:
            raise IndexError(
                f"Y coordinate out of range! Allowed indexes are 0-{self.height}; given: {y}"
            )
        if value not in (-1, 0, 1):
            raise ValueError(
                f"Value incorrect. Allowed values: (-1, 0, 1); given value: {value}"
            )
        self.board[y][x] = value

    def solve(self):
        solver = SolverDFS(self)
        solver.solve()
Exemplo n.º 32
0
def database(database_create_table_query, database_handler_init):
    with patch.object(DatabaseHandler, '__init__', database_handler_init):
        temp_database_handler = DatabaseHandler()
        temp_database_handler.cur.execute(database_create_table_query)
        return temp_database_handler
Exemplo n.º 33
0
class RMS(QMainWindow):
    def __init__(self, cu, cu_instance):
        super().__init__()
        self.cuv = cu.version()
        self.database = DatabaseHandler(
            debug=True if self.cuv in DUMMY_IDS else False)
        self.drivers = {}
        self.setDefaultDrivers()
        self.comp_mode = COMP_MODE__TRAINING
        self.comp_duration = 0
        self.tts = TTSHandler()
        self.tts.start()
        self.main_stack = QStackedWidget(self)
        self.qualifyingseq = QualifyingSeq(self)
        self.main_stack.addWidget(self.qualifyingseq)
        self.threadtranslation = ThreadTranslation()
        self.main_stack.addWidget(self.threadtranslation)
        self.idle = IdleMonitor(cu=cu, cu_instance=cu_instance)
        self.bridge = CUBridge(cu=cu,
                               cu_instance=cu_instance,
                               selected_drivers=self.drivers,
                               tts=self.tts,
                               threadtranslation=self.threadtranslation)
        self.start_signal = StartSignal(cu=cu, cu_instance=cu_instance)
        self.grid = Grid(parent=self)
        self.home = Home(parent=self, database=self.database)
        self.settings = Settings(parent=self, database=self.database)
        self.resultlist = ResultList(parent=self)
        self.main_stack.addWidget(self.home)
        self.main_stack.addWidget(self.grid)
        self.main_stack.addWidget(self.settings)
        self.main_stack.addWidget(self.resultlist)
        self.bridge.update_grid.connect(self.grid.driver_change)
        self.bridge.comp_state.connect(self.comp_state_update)
        self.bridge.comp_finished.connect(self.comp_finished_all)
        self.start_signal.ready_to_run.connect(self.startAfterSignal)
        self.start_signal.show_lights.connect(self.grid.showLight)
        self.idle.update_state.connect(self.show_state)
        self.bridge.update_state.connect(self.show_state)
        self.start_signal.update_state.connect(self.show_state)
        self.setCentralWidget(self.main_stack)
        self.initUI()

    def initUI(self):

        self.statusBar().showMessage('Ready')

        if self.cuv not in DUMMY_IDS:
            self.showMaximized()
        self.setWindowTitle('RMS')
        self.showHome()
        self.show()

    def setDefaultDrivers(self):
        self.drivers = {}
        driversjson = self.database.getConfigStr('DEFAULT_DRIVERS')
        if driversjson is not None:
            driversdb = json.loads(driversjson)
            for addr, driver in driversdb.items():
                addrt = int(addr)
                self.drivers[addrt] = driver

    def startBridgeThread(self):
        if not self.bridge.isRunning():
            self.bridge.stop = False
            self.bridge.start()

    def startIdleThread(self):
        if not self.idle.isRunning():
            self.idle.stop = False
            self.idle.start()

    def startStartSignalThread(self):
        if not self.start_signal.isRunning():
            self.start_signal.stop = False
            self.start_signal.start()

    def stopAllThreads(self):
        if self.bridge.isRunning():
            self.bridge.stop = True
            self.bridge.wait()
        if self.idle.isRunning():
            self.idle.stop = True
            self.idle.wait()
        if self.start_signal.isRunning():
            self.start_signal.stop = True
            self.start_signal.wait()

    def showSettings(self):
        self.main_stack.setCurrentWidget(self.settings)
        self.stopAllThreads()
        self.startIdleThread()

    def showHome(self):
        tn = self.database.getConfigStr('TRACKNAME')
        if tn is not None and len(tn) > 0:
            self.home.headline.setText(tn + ' ' + self.tr('RMS'))
        else:
            self.home.headline.setText(self.tr('Carrera RMS'))
        self.home.buildCarList()
        for i in range(0, 6):
            try:
                n = self.drivers[i]['name']
                c = self.drivers[i]['car']
                self.home.setOk(i, True)
                self.home.setName(i, n)
                self.home.setCar(i, c)
            except KeyError:
                self.home.setOk(i, False)
                self.home.setName(i, '')
                self.home.setCar(i, '')

        self.main_stack.setCurrentWidget(self.home)
        self.stopAllThreads()
        self.startIdleThread()

    def showResultList(self, cu_drivers):
        self.stopAllThreads()
        self.resultlist.resetDrivers()
        self.resultlist.addDrivers(self.drivers, cu_drivers,
                                   self.grid.sort_mode)
        self.main_stack.setCurrentWidget(self.resultlist)

    def showGrid(self):
        self.grid.resetDrivers()
        seq_found = None
        for addr, driver in self.drivers.items():
            if self.comp_mode in [
                    COMP_MODE__QUALIFYING_LAPS_SEQ,
                    COMP_MODE__QUALIFYING_TIME_SEQ
            ]:
                if seq_found is None and \
                        driver['qualifying_cu_driver'] is None:
                    self.grid.addDriver(addr, driver)
                    seq_found = addr
            else:
                self.grid.addDriver(addr, driver)

        self.main_stack.setCurrentWidget(self.grid)
        self.stopAllThreads()

    def startQualifying(self, mode, duration):
        self.comp_mode = mode
        self.comp_duration = duration
        self.grid.sort_mode = SORT_MODE__LAPTIME
        self.showGrid()
        self.bridge.reset(self.drivers, mode)
        self.startStartSignalThread()

    def startRace(self, mode, duration):
        self.comp_mode = mode
        self.comp_duration = duration
        self.grid.sort_mode = SORT_MODE__LAPS
        self.showGrid()
        self.bridge.reset(self.drivers, mode)
        self.startStartSignalThread()

    def startTraining(self):
        self.comp_mode = COMP_MODE__TRAINING
        self.grid.sort_mode = SORT_MODE__LAPTIME
        self.showGrid()
        self.bridge.reset(self.drivers, self.comp_mode)
        self.startStartSignalThread()

    @pyqtSlot()
    def startAfterSignal(self):
        self.startBridgeThread()

    @pyqtSlot(int, list)
    def comp_finished_all(self, rtime, drivers):
        tdrivers = drivers
        self.stopAllThreads()
        if self.comp_mode in [
                COMP_MODE__QUALIFYING_LAPS_SEQ, COMP_MODE__QUALIFYING_TIME_SEQ
        ]:
            seq_found = []
            next = None
            for addr, driver in self.drivers.items():
                if driver['qualifying_cu_driver'] is not None:
                    seq_found.append(driver)
                    if tdrivers[addr].time is not None:
                        driver['qualifying_cu_driver'] = tdrivers[addr]
                elif next is None:
                    next = driver
            if len(seq_found) == len(self.drivers):
                for addr, driver in self.drivers.items():
                    tdrivers[addr] = driver['qualifying_cu_driver']
                self.showResultList(tdrivers)
            else:
                self.qualifyingseq.setDrivers(seq_found[-1], next)
                self.main_stack.setCurrentWidget(self.qualifyingseq)
        else:
            self.showResultList(drivers)

    @pyqtSlot(int, list)
    def comp_state_update(self, rtime, cu_drivers):
        if self.comp_mode == COMP_MODE__TRAINING:
            self.grid.training_state.showTime(rtime=rtime)
        elif self.comp_mode == COMP_MODE__RACE_LAPS:
            self.grid.race_state.handleUpdateLaps(rtime=rtime,
                                                  laps=self.comp_duration,
                                                  cu_drivers=cu_drivers)
        elif self.comp_mode == COMP_MODE__RACE_TIME:
            self.grid.race_state.handleUpdateTime(rtime=rtime,
                                                  minutes=self.comp_duration,
                                                  cu_drivers=cu_drivers)
        elif self.comp_mode == COMP_MODE__QUALIFYING_LAPS:
            self.grid.qualifying_state.handleUpdateLaps(
                rtime=rtime, laps=self.comp_duration, cu_drivers=cu_drivers)
        elif self.comp_mode == COMP_MODE__QUALIFYING_TIME:
            self.grid.qualifying_state.handleUpdateTime(
                rtime=rtime, minutes=self.comp_duration, cu_drivers=cu_drivers)
        elif self.comp_mode == COMP_MODE__QUALIFYING_LAPS_SEQ:
            self.grid.qualifying_state.handleUpdateLapsSeq(
                rtime=rtime, laps=self.comp_duration, cu_drivers=cu_drivers)
        elif self.comp_mode == COMP_MODE__QUALIFYING_TIME_SEQ:
            self.grid.qualifying_state.handleUpdateTimeSeq(
                rtime=rtime, minutes=self.comp_duration, cu_drivers=cu_drivers)

    @pyqtSlot(int)
    def show_state(self, mode):
        binMode = "{0:04b}".format(mode)
        fuelmode = ''
        pitlane = ''
        lapcounter = ''
        if binMode[2] == '1':
            fuelmode = self.tr('Real')
        elif binMode[3] == '1':
            fuelmode = self.tr('On')
        elif binMode[3] == '0':
            fuelmode = self.tr('Off')
        if binMode[1] == '1':
            pitlane = self.tr('Exists')
        else:
            pitlane = self.tr('Missing')
        if binMode[0] == '1':
            lapcounter = self.tr('Exists')
        else:
            lapcounter = self.tr('Missing')
        self.statusBar().showMessage(
            self.tr('CU version: ') + str(self.cuv) + self.tr(', Pitlane: ') +
            str(pitlane) + self.tr(', Fuelmode: ') + str(fuelmode) +
            self.tr(', Lapcounter: ') + str(lapcounter))

    def closeEvent(self, event):
        result = QMessageBox.question(
            self, self.tr("Confirm Exit..."),
            self.tr("Are you sure you want to exit ?"),
            QMessageBox.Yes | QMessageBox.No)
        event.ignore()

        if result == QMessageBox.Yes:
            event.accept()
            self.stopAllThreads()
            self.tts.stop = True
            self.tts.wait()
Exemplo n.º 34
0
 def set_opcode_variety_from_database(self):
     # Order of bag-of-instructions feature vector
     db_handler = DatabaseHandler()
     opcode_variety_list = db_handler.extract_opcode_variety()
     self.set_opcode_variety(opcode_variety_list)
Exemplo n.º 35
0
 def set_ngram_variety_from_database(self, N):
     db_handler = DatabaseHandler()
     ngram_variety_list = db_handler.extract_ngram_variety(N)
     self.set_ngram_variety(ngram_variety_list)
Exemplo n.º 36
0
class TheServant(socketserver.StreamRequestHandler):
    def __init__(self, request, client_address, server):
        super().__init__(request, client_address, server)

    def setup(self):
        socketserver.StreamRequestHandler.setup(self)
        self.the_database = DatabaseHandler()
        self.the_database.connect()

    def finish(self):
        socketserver.StreamRequestHandler.finish(self)
        self.the_database.close()

    # handle requests
    def handle(self):
        try:
            print("Incoming connection...")
            print(self.the_database.db.getTable("location"))
            print(self.the_database.db.getTable("location_history"))
            self._getReq()
        except BadRequestError as e:
            print(e.error_message)

    # send a response. Takes a json object as paramter

    def _getReq(self):
        # List of supported requests, each term in the dictionary matches to a
        # method (this is our proper handler for each type of request)
        SUPPORTED_REQUESTS = {
            "location_push": self.do_push,
            "location_pull": self.do_pull
        }
        # parse the request
        req = self.rfile.readline().strip().decode('utf-8')
        try:
            # Try to load the request and execute the method defined by it
            json_object = json.loads(req)
            print("JSON parsed")
            SUPPORTED_REQUESTS[json_object['query']](json_object)
        except ValueError:
            raise BadRequestError("Not JSON")
        except KeyError:
            raise BadRequestError("Bad Formatted Request")

    def _sendResponse(self, json_response):
        self.wfile.write(bytearray(json_response, 'utf-8'))

    # Insert a location object into the database
    def do_push(self, json_object):
        print("Trying push...")
        try:
            # Try to parse the location object
            location = json_object["location"]
        except KeyError:
            # In case one of the necessary formats is not found
            raise BadRequestError("No location field inside request")
        else:
            # Create the location Object
            try:
                username = location["username"]
                latitude = location["latitude"]
                longitude = location["longitude"]
            except KeyError:
                json_response = json.dumps({'ok': False})
                raise BadRequestError("Bad formatted location Object")
            else:
                # Create a location Object
                location_object = LocationPoint(username, latitude, longitude)

                # insert the object into the database
                self.the_database.push(location_object)
                print("Added the following ", location_object._username)
                # Generate and send the JSON response
                json_response = json.dumps({'ok': True})
            self._sendResponse(json_response)

    # Handle the pull requests
    def do_pull(self, json_object):
        try:
            # Fetch the location for each user
            usernames = json_object["usernames"]
        except KeyError:
            raise BadRequestError("No field \"usernames\"")
        else:
            # Get the matches from the database
            results = self.the_database.pull(usernames)
            # Generate the JSON response
            json_response = json.dumps({'ok': True, 'locations': results})
            # Send the response
            self._sendResponse(json_response)
Exemplo n.º 37
0
class DataReportingThread(threading.Thread):
    def __init__(self, run_event, queue, lcd, version_msg, fadeout_time,
                 db_name):
        threading.Thread.__init__(self)

        self.run_event = run_event
        self.queue = queue

        self.fadeout_time = fadeout_time

        self.lcd = lcd

        self.backlight_status = False
        self.message_list = list()
        self.current_message_num = 0

        self.loop_sleep = 0.10
        self.timeout_counter = 0

        self.version_msg = version_msg

        # LCD messages
        self.lcd.message(self.version_msg)

        self.display_msg = 0
        self.display_wx = 1
        self.display_wheel = 2
        self.display_clock = 3
        self.display_version = 4
        self.last_message = 4
        self.first_message = 0

        self.db = DatabaseHandler(db_name)

        if os.path.exists(db_name) is False:
            logger.info(
                "Database file {0} not found, creating".format(db_name))
            self.db.create_database()

    def run(self):
        """
        Run handler for the data reporting subprocess.  This checks for data coming over a queue; if there is any,
        it'll pull the data, save it to the database, and update the LCD.
        :return:
        """
        last_datetime = None
        while self.run_event.is_set():
            try:
                self.timeout_counter += self.loop_sleep

                # If the backlight is on, check to see if it's been on longer than the "fadeout" time.  If so
                # shut it off
                if self.timeout_counter >= self.fadeout_time:
                    if self.backlight_status is True:
                        self.lcd.set_backlight(1)
                        self.backlight_status = False
                    self.timeout_counter = 0

                # If we're showing the clock, update it.  We need to do it here before we wait on the queue,
                # otherwise we'll essentially block behind the queue.get() call
                if self.current_message_num == self.display_clock:
                    current_time = datetime.datetime.now().strftime("%H:%M")
                    if current_time != last_datetime:
                        self.update_time()
                        last_datetime = current_time

                # Wait here for something to show up on the queue.  If the timeout expires with nothing, it'll
                # throw a Queue.Empty exception
                data = self.queue.get(True, self.loop_sleep)

                #                logger.info("LCD_HANDLER: {0}".format(data))
                # If we have some data, and it's what we're expecting...
                if 'data_type' in data:
                    self.handle_update(data)

            except Queue.Empty:
                # The queue.get with a timeout throws a Queue.Empty exception. Just continue if that happens
                pass

        logger.info("Thread {0} closing".format(
            threading.currentThread().name))

    def handle_update(self, data):
        """
        :param self:
        :param data: Dictionary which holds the sensor data
        :return:
        """
        data_type = data['data_type']

        if data_type == 'temperature':
            if self.current_message_num == self.display_wx:
                self.update_environment()

        elif data_type == 'light':
            if self.current_message_num == self.display_wx:
                self.update_environment()
        elif data_type == 'wheel':
            if self.current_message_num == self.display_wheel:
                self.update_wheel()
        elif data_type == 'button':
            self.update_lcd()
            self.timeout_counter = 0

        self.db.save_to_database(data)

    def update_lcd(self):
        """

        :return:
        """
        if self.backlight_status is False:
            self.backlight_status = True
            self.lcd.set_backlight(0)
        else:
            self.current_message_num += 1
            if self.current_message_num > self.last_message:
                self.current_message_num = self.first_message

            logger.info("Current screen: {0}".format(self.current_message_num))

            if self.current_message_num == self.display_version:
                self.update_version()
            elif self.current_message_num == self.display_wx:
                self.update_environment()
            elif self.current_message_num == self.display_clock:
                self.update_time()
            elif self.current_message_num == self.display_wheel:
                self.update_wheel()
            elif self.current_message_num == self.display_msg:
                self.update_message()

    def update_message(self):
        hour = datetime.datetime.now().hour
        if hour < 12:
            greeting = "Good Morning"
        elif 12 <= hour < 18:
            greeting = "Good Afternoon"
        else:
            greeting = "Good Evening"

        message = "S'more Monitor\n{0}".format(greeting)
        self.lcd.clear()
        self.lcd.message(message)

    def update_version(self):
        message = self.version_msg
        self.lcd.clear()
        self.lcd.message(message)

    def update_wheel(self):
        """
        Retrieves data aon the wheel and displays it
        :return:
        """

        # Get the database info - last 24 hours of data
        wheel_data = self.db.get_wheel_data()
        if wheel_data is not None:
            distance = wheel_data[0] if wheel_data[0] is not None else 0
            revs = wheel_data[1] if wheel_data[1] is not None else 0
            moving_time = wheel_data[2] if wheel_data[2] is not None else 0

            #  Watch for a divide by zero error - should only happen if we have no data
            try:
                speed = distance / moving_time * 3600
            except ZeroDivisionError:
                speed = 0

            # Format the display.  We're showing revolutions and distance on one line,
            # elapsed time and average speed on the second line
            turns_display = "{revs:.0f}t".format(revs=revs)
            distance_display = "{dist:>{just}.3f}mi".format(
                dist=distance, just=16 - len(turns_display) - 1 - 2)
            time_display = "{time:,.0f}s".format(time=moving_time)
            speed_display = "{speed:{just}.2f}mph".format(
                speed=speed, just=16 - len(time_display) - 1 - 2)

            message = "{0} {1}\n{2}{3}".format(turns_display, distance_display,
                                               time_display, speed_display)
        else:
            message = "Not Available"

        self.lcd.clear()
        self.lcd.message(message)

    def update_environment(self):
        """

        :return:
        """

        # Get the latest environment data from the database
        data = self.db.get_environment_data()
        if data is not None:
            temp_f = data[0]
            humidity = data[1]
            lux = data[2]

            temp_display = "{0}{1}{2}".format(temp_f, chr(223), 'F')
            humidity_display = "{0:.1f}%".format(humidity)
            lux_display = "{0} lux".format(lux)

            temp_len = len(temp_display)
            message = "{0} {1:>{just}}\n{lux:^16}".format(temp_display,
                                                          humidity_display,
                                                          just=16 - temp_len -
                                                          1,
                                                          lux=lux_display)
        else:
            message = "Not Available"
        self.lcd.clear()
        self.lcd.message(message)

    def update_time(self):
        time_now = datetime.datetime.now()
        display_time = time_now.strftime("%a %m/%d/%Y\n%I:%M %p")
        message = display_time
        self.lcd.clear()
        self.lcd.message(message)
Exemplo n.º 38
0
 def __init__(self):
     self.db_handler = DatabaseHandler()
     self.cli = CommandLineInterface()
Exemplo n.º 39
0
        opcode_sequence = db_handler.extract_opcode_sequence(file_id)
        if extraction_method == 'bag-of-opcodes':
            feature_vector = self.extract_bag_of_opcodes(opcode_sequence)
        elif extraction_method == '2-gram':
            feature_vector = self.extract_ngram(opcode_sequence, 2)
        elif extraction_method == '3-gram':
            feature_vector = self.extract_ngram(opcode_sequence, 3)
        elif extraction_method == 'proposed':
            subroutine_sequence = db_handler.extract_subroutine_sequence(file_id)
            average_subroutine_length = self.extract_average_subroutine_length(subroutine_sequence)
            location_sequence = db_handler.extract_location_sequence(file_id)
            average_basicblock_length = self.extract_average_basicblock_length(location_sequence)
            # construct feature_vector here
        else:
            sys.stderr.write('Error: no extraction method "' + extraction_method + '" found.')
            sys.exit()
        
        return feature_vector

if __name__ == '__main__':
    db_handler = DatabaseHandler()
    opcode_sequence = db_handler.extract_opcode_sequence(500)
    # bigrams = nltk.bigrams(opcode_sequence)
    # fd = nltk.FreqDist(bigrams)
    # cfd = nltk.ConditionalFreqDist(bigrams)
    # cfd[u'cmp'].plot(50)
    trigrams = nltk.trigrams(opcode_sequence)
    print list(trigrams)
    # fd = nltk.FreqDist(trigrams)
    cfd = nltk.ConditionalFreqDist(trigrams)
    cfd[u'cmp'].plot(50)
Exemplo n.º 40
0
import os
import json
import activity
import time
import requests
from tornado import ioloop, web
from datetime import datetime
from database import DatabaseHandler
from authentication import Authentication

ip = os.environ.get("SP_BOT_SERVICE_HOST", None)  # access OpenShift environment host IP
host_port = os.environ.get("SP_BOT_SERVICE_PORT", 8000)  # access OpenShift environment PORT
db_handler = DatabaseHandler()  # initialize database handler object
authenticator = Authentication()  # initialize authentication object

class MainHandler(web.RequestHandler):

    # --- REQUEST HANDLERS ---
    def get(self, *args, **kwargs):  # incoming GET request
        print("\nParsing GET request...")
        self.write("Hello, world from Diagnostic Bot!")

    def post(self, *args, **kwargs):  # incoming POST request
        print("\n[{}] Received POST Request from client...".format(datetime.now()))

        # (1) Decode the POST data -> a dictionary:
        json_data = self.request.body.decode('utf-8')  # obtain POST body from request, decode from bytes -> Str
        post_body = json.loads(json_data)  # convert JSON data -> dict

        # (2) Authenticate incoming message & generate a response header:
        auth_header = self.request.headers.get('Authorization', None)
Exemplo n.º 41
0
class GUIDatabase:
    def __init__(self, queue):
        self.queue = queue  # queue used to communicate with other windows

        self.layout = None
        self.window = None

        self.db_handler = DatabaseHandler('puzzles.db')
        self.selected_data = []

    def set_layout(self):
        diff_layout = [[sg.Checkbox('Very easy', size=(15, 1))], [sg.Checkbox('Easy', default=True)],
                       [sg.Checkbox('Medium', size=(15, 1))], [sg.Checkbox('Hard', default=True)],
                       [sg.Checkbox('Very hard', size=(15, 1))]]
        results_layout = [[sg.Text('ID\tRows\tCols\tDiff', size=(30, 1))],
                          [sg.Listbox(key='-SEL_PUZZLES-', values=[], select_mode=sg.LISTBOX_SELECT_MODE_EXTENDED, size=(30, 15))],
                          [sg.Button(button_text='Load all puzzles'), sg.Button(button_text='Load selected puzzles')]]

        self.layout = [[sg.Text('Search in database'), sg.Text('', key='-OUTPUT-')],
                       [sg.Frame('Difficulty', diff_layout)],
                       [sg.Text('No. of rows', size=(10, 1)), sg.InputCombo(('<', '<=', '=', '>=', '>'), default_value='=', size=(3, 1)), sg.InputCombo([str(5*i) for i in range(1, 21)], default_value=5, size=(8, 1))],
                       [sg.Text('No. of cols', size=(10, 1)), sg.InputCombo(('<', '<=', '=', '>=', '>'), default_value='=', size=(3, 1)), sg.InputCombo([str(5 * i) for i in range(1, 21)], default_value=5, size=(8, 1))],
                       [sg.Submit(button_text='Search', tooltip='Click to submit this form')],
                       [sg.Frame('Results', results_layout)]
        ]

        self.window = sg.Window('Database browser', self.layout, finalize=True, resizable=True)

    def prepare_select(self, values):
        difficulties = [i for i in range(0, 5) if values[i] is True]
        difficulties = str(tuple(difficulties))
        rows = values[6]
        cols = values[8]

        sql = f'SELECT id, rows, cols, difficulty from puzzle where difficulty in {difficulties} and rows{values[5]}{rows} and cols{values[7]}{cols} order by id;'
        data = self.db_handler.query_sql(sql)

        # parse data for displaying
        parsed_data = [f'{d[0]}\t\t{d[1]}\t\t{d[2]}\t{d[3]}' for d in data]
        self.window['-SEL_PUZZLES-'].update(values=parsed_data)

        # save IDs of selected puzzles
        self.selected_data = [d[0] for d in data]

    def event_handler(self):
        event, values = self.window.read(timeout=0)

        if event in (None,):
            return False

        if event in 'Load all puzzles':
            if len(self.selected_data) > 0:
                self.queue.append(['ids', self.selected_data])
                self.window.close()
                return False

        if event in 'Load selected puzzles':
            selected = self.window['-SEL_PUZZLES-'].get_indexes()
            self.queue.append(['ids', [self.selected_data[i] for i in selected]])
            self.window.close()
            return False

        if event in 'Search':
            self.prepare_select(values)
        return True
Exemplo n.º 42
0
class ApiPoller(threading.Thread):
    database = None
    last_seen_id = 0  # gid
    last_seen_date = 0  # Unix time stamp
    lock = threading.Lock()
    keep_polling = True

    def __init__(self):
        super(ApiPoller, self).__init__()
        self.__stop_event = threading.Event()
        dbname = os.environ['DB_NAME']
        dbuser = os.environ['DB_USER']
        dbpass = os.environ['DB_PASS']
        dbhost = os.environ['DB_HOST']
        dbport = os.environ['DB_PORT']
        self.database = DatabaseHandler(dbname=dbname,
                                        user=dbuser,
                                        password=dbpass,
                                        host=dbhost,
                                        port=dbport)

    def start_polling_api(self):
        self.database.create_tables()
        last_seen = self.database.get_last_seen(570)
        self.last_seen_id = last_seen[1]
        self.last_seen_date = last_seen[2]
        print("Started listening for new updates")
        while self.keep_polling:
            thread = threading.Thread(target=self.request_update_info)
            thread.start()
            time.sleep(1)

    def stop_polling_api(self):
        self.__stop_event.set()
        self.keep_polling = False

    def request_update_info(self):
        response = requests.get("http://api.steampowered.com/ISteamNews/GetNewsForApp/v0002/?appid=570&count=1"
                                "&maxlength=300&format=json")
        response_as_json = response.json()
        last_update_item = response_as_json['appnews']['newsitems'][0]
        if self._should_update(last_update_item):
            self.update(last_update_item)

    def update(self, update_item):
        try:
            self.lock.acquire()
            self.last_seen_id = update_item['gid']
            self.last_seen_date = update_item['date']
            print("New last seen id: ", self.last_seen_id)
            print("New last seen date: ", self.last_seen_date)
            self.database.update_last_seen(570, self.last_seen_id, self.last_seen_date)
            _send_webhook_update(update_item['url'])
        finally:
            self.lock.release()

    def _should_update(self, latest_update_item):
        gid = latest_update_item['gid']
        date = latest_update_item['date']
        feedname = latest_update_item['feedname']
        return self.last_seen_date < date and self.last_seen_id != gid and _is_relevant_feed(feedname)
Exemplo n.º 43
0
 def __init__(self):
     self.database = DatabaseHandler()
     self.main_menu = self._init_main_menu()
     self._run_menu(self.main_menu)
Exemplo n.º 44
0
 def setup(self):
     socketserver.StreamRequestHandler.setup(self)
     self.the_database = DatabaseHandler()
     self.the_database.connect()
Exemplo n.º 45
0
class HPS:
    """
    Hyper Parameter Search
    
    Maps pylearn2 to a postgresql database. The idea is to accumulate 
    structured data concerning the hyperparameter optimization of 
    pylearn2 models and various datasets. With enough such structured data,
    one could train a meta-model that could be used for efficient 
    sampling of hyper parameter configurations.
    
    Jobman doesn't provide this since its data is unstructured and 
    decentralized. To centralize hyper parameter data, we would need to 
    provide it in the form of a ReSTful web service API.
    
    For now, I just use it instead of the jobman database to try various 
    hyperparameter configurations.
    
    """
    def __init__(self, 
                 dataset_name,
                 task_id,
                 train_ddm, valid_ddm, 
                 log_channel_names,
                 test_ddm = None,
                 save_prefix = "model_",
                 mbsb_channel_name = None):
        self.dataset_name = dataset_name
        self.task_id = task_id
        
        self.train_ddm = train_ddm
        self.valid_ddm = valid_ddm
        self.test_ddm = test_ddm
        self.monitoring_dataset = {'train': train_ddm}
        
        self.nvis = self.train_ddm.get_design_matrix().shape[1]
        self.nout = self.train_ddm.get_targets().shape[1]
        self.ntrain = self.train_ddm.get_design_matrix().shape[0]
        self.nvalid = self.valid_ddm.get_design_matrix().shape[0]
        self.ntest = 0
        if self.test_ddm is not None:
            self.ntest = self.test_ddm.get_design_matrix().shape[0]
        
        self.log_channel_names = log_channel_names
        self.save_prefix = save_prefix
        # TODO store this in data for each experiment or dataset
        self.mbsb_channel_name = mbsb_channel_name
        
        print "nvis, nout :", self.nvis, self.nout
        print "ntrain :", self.ntrain
        print "nvalid :", self.nvalid
        
    def run(self, start_config_id = None):
        self.db = DatabaseHandler()
        print 'running'
        while True:
            
            (config_id, config_class, model, learner, algorithm) \
                = self.get_config(start_config_id)
            start_config_id = None
            print 'learning'     
            learner.main_loop()
            
            self.set_end_time(config_id)
    def get_config(self, start_config_id = None):
        if start_config_id is not None:
            (config_id,config_class,random_seed,ext_array) \
                = self.select_config(start_config_id)
        else:
            (config_id,config_class,random_seed,ext_array) \
                = self.select_next_config()
        # model (could also return Cost)
        (weight_decay, model, batch_size) \
            = self.get_model(config_id, config_class)
        
        # prepare monitor
        self.prep_valtest_monitor(model, batch_size)
        
        # extensions
        extensions = self.get_extensions(ext_array, config_id)
        
        costs = [MethodCost(method='cost_from_X', supervised=True)]
        if weight_decay is not None:
            costs.append(WeightDecay(coeffs=weight_decay))
        if len(costs) > 1:
            cost = SumOfCosts(costs)
        else:
            cost = costs[0]
    
        # training algorithm
        algorithm = self.get_trainingAlgorithm(config_id, config_class, cost)
        
        print 'sgd complete'
        learner = Train(dataset=self.train_ddm,
                        model=model,
                        algorithm=algorithm,
                        extensions=extensions)
        return (config_id, config_class, model, learner, algorithm)
    def get_classification_accuracy(self, model, minibatch, target):
        Y = model.fprop(minibatch, apply_dropout=False)
        return T.mean(T.cast(T.eq(T.argmax(Y, axis=1), 
                               T.argmax(target, axis=1)), dtype='int32'),
                               dtype=config.floatX)
    def prep_valtest_monitor(self, model, batch_size):
        if self.topo_view:
            print "topo view"
            minibatch = T.as_tensor_variable(
                            self.valid_ddm.get_batch_topo(batch_size), 
                            name='minibatch'
                        )
        else:
            print "design view"
            minibatch = T.as_tensor_variable(
                            self.valid_ddm.get_batch_design(batch_size), 
                            name='minibatch'
                        )
        target = T.matrix('target')
        Accuracy = self.get_classification_accuracy(model, minibatch, target)           
        monitor = Monitor.get_monitor(model)
        
        monitor.add_dataset(self.valid_ddm, 'sequential', batch_size)
        monitor.add_channel("Validation Classification Accuracy",
                            (minibatch, target),
                            Accuracy,
                            self.valid_ddm)
        monitor.add_channel("Validation Missclassification",
                            (minibatch, target),
                            1.0-Accuracy,
                            self.valid_ddm)
                            
        if self.test_ddm is not None:
            monitor.add_dataset(self.test_ddm, 'sequential', batch_size)
            monitor.add_channel("Test Classification Accuracy",
                                (minibatch, target),
                                Accuracy,
                                self.test_ddm)
                                
    def get_trainingAlgorithm(self, config_id, config_class, cost):
        if 'sgd' in config_class:
            (learning_rate,batch_size,init_momentum,train_iteration_mode) \
                = self.select_train_sgd(config_id)
            num_train_batch = (self.ntrain/batch_size)
            print "num training batches:", num_train_batch
            termination_criterion \
                = self.get_termination(config_id, config_class)
            return SGD( learning_rate=learning_rate,
                        cost=cost,
                        batch_size=batch_size,
                        batches_per_iter=num_train_batch,
                        monitoring_dataset=self.monitoring_dataset,
                        termination_criterion=termination_criterion,
                        init_momentum=init_momentum,
                        train_iteration_mode=train_iteration_mode) 
        else:
            raise HPSData("training class not supported:"+str(config_class))
    def get_model(self, config_id, config_class):
        if 'mlp' in config_class:
            (layer_array,batch_size,input_space_id,dropout_include_probs,
                dropout_scales,dropout_input_include_prob,
                 dropout_input_scale,weight_decay,nvis) \
                    = self.select_model_mlp(config_id)
            input_space = None
            self.topo_view = False
            if input_space_id is not None:
                input_space = self.get_space(input_space_id)
                if isinstance(input_space, Conv2DSpace):
                    self.topo_view = True
                assert nvis is None
            if (input_space_id is None) and (nvis is None):
                # default to training set nvis
                nvis = self.nvis
            layers = []
            for layer_id in layer_array:
                layer = self.get_layer(layer_id)
                layers.append(layer)
            # output layer is always called "output":
            layers[-1].layer_name = "output"
            # create MLP:
            model = MLP(layers=layers,
                        input_space=input_space,nvis=nvis,
                        batch_size=batch_size,
                        dropout_include_probs=dropout_include_probs,
                        dropout_scales=dropout_scales,
                        dropout_input_include_prob=dropout_input_include_prob,
                        dropout_input_scale=dropout_input_scale)   
            print 'mlp is built'
            return (weight_decay, model, batch_size)
    def get_layer(self, layer_id):
        """Creates a Layer instance from its definition in the database."""
        (layer_class, layer_name) = self.select_layer(layer_id)
        if layer_class == 'maxout':
            (num_units,num_pieces,pool_stride,randomize_pools,irange,
                 sparse_init,sparse_stdev,include_prob,init_bias,W_lr_scale,
                 b_lr_scale,max_col_norm, max_row_norm) \
                     = self.select_layer_maxout(layer_id)
            return Maxout(num_units=num_units,num_pieces=num_pieces,
                           pool_stride=pool_stride,layer_name=layer_name,
                           randomize_pools=randomize_pools,
                           irange=irange,sparse_init=sparse_init,
                           sparse_stdev=sparse_stdev,
                           include_prob=include_prob,
                           init_bias=init_bias,W_lr_scale=W_lr_scale, 
                           b_lr_scale=b_lr_scale,max_col_norm=max_col_norm,
                           max_row_norm=max_row_norm)
        elif layer_class == 'softmax':
            (n_classes,irange,istdev,sparse_init,W_lr_scale,b_lr_scale, 
                 max_row_norm,no_affine,max_col_norm) \
                     = self.select_layer_softmax(layer_id) 
            return Softmax(n_classes=n_classes,irange=irange,istdev=istdev,
                            sparse_init=sparse_init,W_lr_scale=W_lr_scale,
                            b_lr_scale=b_lr_scale,max_row_norm=max_row_norm,
                            no_affine=no_affine,max_col_norm=max_col_norm,
                            layer_name=layer_name)
        elif layer_class == 'rectifiedlinear':
            (dim,irange,istdev,sparse_init,sparse_stdev,include_prob,
                init_bias,W_lr_scale,b_lr_scale,left_slope,max_row_norm,
                max_col_norm,use_bias)\
                    = self.select_layer_rectifiedlinear(layer_id)
            return RectifiedLinear(dim=dim,irange=irange,istdev=istdev,
                                    sparse_init=sparse_init,
                                    sparse_stdev=sparse_stdev,
                                    include_prob=include_prob,
                                    init_bias=init_bias,
                                    W_lr_scale=W_lr_scale,
                                    b_lr_scale=b_lr_scale,
                                    left_slope=left_slope,
                                    max_row_norm=max_row_norm,
                                    max_col_norm=max_col_norm,
                                    use_bias=use_bias,
                                    layer_name=layer_name)
        elif layer_class == 'convrectifiedlinear':
            (output_channels,kernel_width,pool_width,pool_stride,irange,
                border_mode,sparse_init,include_prob,init_bias,W_lr_scale,
                b_lr_scale,left_slope,max_kernel_norm) \
                    = self.select_layer_convrectifiedlinear(layer_id) 
            return ConvRectifiedLinear(output_channels=output_channels,
                        kernel_shape=(kernel_width, kernel_width),
                        pool_shape=(pool_width, pool_width),
                        pool_stride=(pool_stride, pool_stride),
                        layer_name=layer_name, irange=irange,
                        border_mode=border_mode,sparse_init=sparse_init,
                        include_prob=include_prob,init_bias=init_bias,
                        W_lr_scale=W_lr_scale,b_lr_scale=b_lr_scale,
                        left_slope=left_slope,
                        max_kernel_norm=max_kernel_norm)
        elif layer_class == 'maxoutconvc01b':
            (num_channels,num_pieces,kernel_width,pool_width,pool_stride,
                irange	,init_bias,W_lr_scale,b_lr_scale,pad,fix_pool_shape,
                fix_pool_stride,fix_kernel_shape,partial_sum,tied_b,
                max_kernel_norm,input_normalization,output_normalization) \
                    = self.select_layer_maxoutConvC01B(layer_id) 
            return MaxoutConvC01B(layer_name=layer_name,
                                  num_channels=num_channels,
                                  num_pieces=num_pieces,
                                  kernel_shape=(kernel_width,kernel_width),
                                  pool_shape=(pool_width, pool_width),
                                  pool_stride=(pool_stride,pool_stride),
                                  irange=irange,init_bias=init_bias,
                                  W_lr_scale=W_lr_scale,
                                  b_lr_scale=b_lr_scale,pad=pad,
                                  fix_pool_shape=fix_pool_shape,
                                  fix_pool_stride=fix_pool_stride,
                                  fix_kernel_shape=fix_kernel_shape,
                                  partial_sum=partial_sum,tied_b=tied_b,
                                  max_kernel_norm=max_kernel_norm,
                                  input_normalization=input_normalization,
                                  output_normalization=output_normalization)
        elif layer_class == 'sigmoid':
            (dim,irange,istdev,sparse_init,sparse_stdev,include_prob,init_bias,
                W_lr_scale,b_lr_scale,max_col_norm,max_row_norm) \
                    = self.select_layer_sigmoid(layer_id)
            return Sigmoid(layer_name=layer_name,dim=dim,irange=irange,
                           istdev=istdev,
                           sparse_init=sparse_init,sparse_stdev=sparse_stdev,
                           include_prob=include_prob,init_bias=init_bias,
                           W_lr_scale=W_lr_scale,b_lr_scale=b_lr_scale,
                           max_col_norm=max_col_norm,
                           max_row_norm=max_row_norm)
        else:
            assert False
    def get_termination(self, config_id, config_class):
        terminations = []
        if 'epochcounter' in config_class:
            print 'epoch_counter'
            max_epochs = self.select_term_epochCounter(config_id)
            terminations.append(EpochCounter(max_epochs))
        if 'monitorbased' in config_class:
            print 'monitor_based'
            (proportional_decrease, max_epochs, channel_name) \
                = self.select_term_monitorBased(config_id)
            terminations.append(
                MonitorBased(
                    prop_decrease = proportional_decrease, 
                    N = max_epochs, channel_name = channel_name
                )
            )
        if len(terminations) > 1:
            return And(terminations)
        elif len(terminations) == 0:
            return None
        return terminations[0]
    def get_space(self, space_id):
        space_class = self.select_space(space_id)
        if space_class == 'conv2dspace':
            (num_row, num_column, num_channels, axes_char) \
                = self.select_space_conv2DSpace(space_id)
            if axes_char == 'b01c':
                axes = ('b', 0, 1, 'c')
            elif axes_char == 'c01b':
                axes = ('c', 0, 1, 'b')
            print axes
            return Conv2DSpace(shape=(num_row, num_column), 
                               num_channels=num_channels, axes=axes)
        else:
            raise HPSData("Space class not supported:"+str(space_class))
    def get_extensions(self, ext_array, config_id):
        if ext_array is None:
            return []
        extensions = []
        for ext_id in ext_array:
            ext_class = self.select_extension(ext_id)
            if ext_class == 'exponentialdecayoverepoch':
                (decay_factor, min_lr) \
                    =  self.select_ext_exponentialDecayOverEpoch(ext_id)
                extensions.append(
                    ExponentialDecayOverEpoch(
                        decay_factor=decay_factor,min_lr=min_lr
                    )
                )
            elif ext_class == 'momentumadjustor':
                (final_momentum, start_epoch, saturate_epoch) \
                    = self.select_ext_momentumAdjustor(ext_id)
                extensions.append(
                    MomentumAdjustor(
                        final_momentum=final_momentum,
                        start=start_epoch, 
                        saturate=saturate_epoch
                    )
                )
            else:
                raise HPSData("ext class not supported:"+str(ext_class))
        # monitor based save best
        if self.mbsb_channel_name is not None:
            save_path = self.save_prefix+str(config_id)+"_best.pkl"
            extensions.append(MonitorBasedSaveBest(
                    channel_name = self.mbsb_channel_name,
                    save_path = save_path
                )
            )
        
        # HPS Logger
        extensions.append(
            HPSLog(self.log_channel_names, self.db, config_id)
        )
        return extensions
    def select_train_sgd(self, config_id):
        row = self.db.executeSQL("""
        SELECT learning_rate,batch_size,init_momentum,train_iteration_mode
        FROM hps2.train_sgd
        WHERE config_id = %s
        """, (config_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No stochasticGradientDescent for config_id=" \
                +str(config_id))
        return row
    def set_end_time(self, config_id):
        return self.db.executeSQL("""
        UPDATE hps2.config 
        SET end_time = now()
        WHERE config_id = %s
        """, (config_id,), self.db.COMMIT)  
    def set_accuracy(self, config_id, accuracy):
        return self.db.executeSQL("""
        INSERT INTO hps2.validation_accuracy (config_id, accuracy)
        VALUES (%s, %s)
        """, (config_id, accuracy), self.db.COMMIT)  
    def select_space(self, space_id):
        row = self.db.executeSQL("""
        SELECT space_class
        FROM hps2.space
        WHERE space_id = %s
        """, (space_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No space for space_id="+str(space_id))
        return row[0]
    def select_space_conv2DSpace(self, space_id):
        row = self.db.executeSQL("""
        SELECT num_row, num_column, num_channel, axes
        FROM hps2.space_conv2DSpace
        WHERE space_id = %s
        """, (space_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No conv2DSpace for space_id="+str(space_id))
        return row
    def select_extension(self, ext_id):
        row = self.db.executeSQL("""
        SELECT ext_class
        FROM hps2.extension
        WHERE ext_id = %s
        """, (ext_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No extension for ext_id="+str(ext_id))
        return row[0]
    def select_ext_exponentialDecayOverEpoch(self, ext_id):
        row = self.db.executeSQL("""
        SELECT decay_factor, min_lr
        FROM hps2.ext_exponentialDecayOverEpoch
        WHERE ext_id = %s
        """, (ext_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No exponentialDecayOverEpoch ext for ext_id=" \
                +str(ext_id))
        return row
    def select_ext_momentumAdjustor(self, ext_id):
        row = self.db.executeSQL("""
        SELECT final_momentum, start_epoch, saturate_epoch
        FROM hps2.ext_momentumAdjustor
        WHERE ext_id = %s
        """, (ext_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No momentumAdjustor extension for ext_id=" \
                +str(ext_id))
        return row
    def select_next_config(self):
        row = None
        for i in xrange(10):
            c = self.db.conn.cursor()
            c.execute("""
            BEGIN;
    
            SELECT config_id,config_class,random_seed,ext_array
            FROM hps2.config 
            WHERE start_time IS NULL AND task_id = %s
            LIMIT 1 FOR UPDATE;
            """, (self.task_id,))
            row = c.fetchone()
            if row is not None and row:
                break
            time.sleep(0.1)
            c.close()
        if not row or row is None:
            raise HPSData("No more configurations for task_id=" \
                +str(self.task_id)+" "+row)
        (config_id,config_class,random_seed,ext_array) = row
        c.execute("""
        UPDATE hps2.config
        SET start_time = now() 
        WHERE config_id = %s;
        """, (config_id,))
        self.db.conn.commit()
        c.close()
        return (config_id,config_class,random_seed,ext_array)
    def select_config(self, config_id):
        row = None
        for i in xrange(10):
            c = self.db.conn.cursor()
            c.execute("""
            BEGIN;
    
            SELECT config_id,config_class,random_seed,ext_array
            FROM hps2.config 
            WHERE config_id = %s 
            LIMIT 1 FOR UPDATE;
            """, (config_id,))
            row = c.fetchone()
            if row is not None and row:
                break
            time.sleep(0.1)
            c.close()
        if not row or row is None:
            raise HPSData("No more configurations for config_id=" \
                +str(config_id)+", row:"+str(row))
        (config_id,config_class,random_seed,ext_array) = row
        c.execute("""
        UPDATE hps2.config
        SET start_time = now() 
        WHERE config_id = %s;
        """, (config_id,))
        self.db.conn.commit()
        c.close()
        return (config_id,config_class,random_seed,ext_array)
    def select_model_mlp(self, config_id):
        row = self.db.executeSQL("""
        SELECT  layer_array,batch_size,input_space_id,dropout_include_probs,
                dropout_scales,dropout_input_include_prob,
                 dropout_input_scale,weight_decay,nvis		
        FROM hps2.model_mlp
        WHERE config_id = %s
        """, (config_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No model mlp for config_id="+str(config_id))
        return row
    def select_layer(self, layer_id):
        row = self.db.executeSQL("""
        SELECT layer_class, layer_name
        FROM hps2.layer
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No layer for layer_id="+str(layer_id))
        return row
    def select_layer_maxout(self, layer_id):
        row = self.db.executeSQL("""
        SELECT   num_units,num_pieces,pool_stride,randomize_pools,irange,
                 sparse_init,sparse_stdev,include_prob,init_bias,W_lr_scale,
                 b_lr_scale,max_col_norm,max_row_norm
        FROM hps2.layer_maxout
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No maxout layer for layer_id="+str(layer_id))
        return row
    def select_layer_softmax(self, layer_id):
        row = self.db.executeSQL("""
        SELECT  n_classes,irange,istdev,sparse_init,W_lr_scale,b_lr_scale, 
                max_row_norm,no_affine,max_col_norm
        FROM hps2.layer_softmax
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No softmax layer for layer_id="+str(layer_id))
        return row
    def select_layer_rectifiedlinear(self, layer_id):
        row = self.db.executeSQL("""
        SELECT  dim,irange,istdev,sparse_init,sparse_stdev,include_prob,
                init_bias,W_lr_scale,b_lr_scale,left_slope,max_row_norm,
                max_col_norm,use_bias
        FROM hps2.layer_rectifiedlinear
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No rectifiedlinear layer for layer_id="\
                +str(layer_id))
        return row
    def select_layer_convrectifiedlinear(self, layer_id):
        row = self.db.executeSQL("""
        SELECT  output_channels,kernel_width,pool_width,pool_stride,irange,
                border_mode,sparse_init,include_prob,init_bias,W_lr_scale,
                b_lr_scale,left_slope,max_kernel_norm
        FROM hps2.layer_convrectifiedlinear
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No convrectifiedlinear layer for layer_id=" \
                +str(layer_id))
        return row
    def select_layer_maxoutConvC01B(self, layer_id):
        row = self.db.executeSQL("""
        SELECT  num_channels,num_pieces,kernel_width,pool_width,pool_stride,
                irange	,init_bias,W_lr_scale,b_lr_scale,pad,fix_pool_shape,
                fix_pool_stride,fix_kernel_shape,partial_sum,tied_b,
                max_kernel_norm,input_normalization,output_normalization
        FROM hps2.layer_maxoutConvC01B
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No maxoutConvC01B layer for layer_id=" \
                +str(layer_id))
        return row
    def select_layer_sigmoid(self, layer_id):
        row = self.db.executeSQL("""
        SELECT  dim,irange,istdev,sparse_init,sparse_stdev,include_prob,init_bias,
                W_lr_scale,b_lr_scale,max_col_norm,max_row_norm
        FROM hps2.layer_sigmoid
        WHERE layer_id = %s
        """, (layer_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No sigmoid layer for layer_id=" \
                +str(layer_id))
        return row
    def select_term_epochCounter(self, config_id):
        row = self.db.executeSQL("""
        SELECT ec_max_epoch
        FROM hps2.term_epochcounter
        WHERE config_id = %s
        """, (config_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No epochCounter term for config_id="\
                +str(config_id))
        return row[0]
    def select_term_monitorBased(self, config_id):
        row = self.db.executeSQL("""
        SELECT proportional_decrease, mb_max_epoch, channel_name
        FROM hps2.term_monitorBased
        WHERE config_id = %s
        """, (config_id,), self.db.FETCH_ONE)
        if not row or row is None:
            raise HPSData("No monitorBased term for config_id="\
                +str(config_id))
        return row
    def select_preprocess(self, preprocess_id):
        row =  self.db.executeSQL("""
        SELECT dataset_desc, dataset_nvis
        FROM hps2.dataset
        WHERE dataset_id = %s
        """, (preprocess_id,), self.db.FETCH_ONE)