Exemple #1
0
def main():
    '''Main function of the script'''
    paused = False

    logger = CliLogger()
    screen = Screen()
    resources = Resources()
    analytics = Analytics(logger)
    cooldown = Cooldown(COOLDOWNS)
    analytics.ignore = ANALYTICS_IGNORE
    resources.load(analytics)
    utility = Utility(logger, screen, resources, analytics, cooldown)
    logic = Logic(utility)
    try:
        handle = wait_league_window(logger, (0, 0, 1024, 768))
    except CantForgroundWindowError:
        pass
    logger.log('Press and hold x to exit bot.')
    screen.d3d.capture(target_fps=10, region=find_rect(handle))
    while True:
        try:
            if keyboard.is_pressed('x'):
                raise BotExitException
            if keyboard.is_pressed('ctrl+u'):
                paused = False
            if paused:
                time.sleep(0.1)
                continue
            if keyboard.is_pressed('ctrl+p'):
                paused = True
                logger.log(
                    'Bot paused. Press ctrl+u to unpause. Press x to exit.')
                continue
            logic.tick()
            time.sleep(random.randint(*TICK_INTERVAL) / 1000)
        except BotContinueException as exp:
            time.sleep(random.randint(*exp.tick_interval) / 1000)
        except NoCharacterInMinimap:
            time.sleep(1)
        except BotExitException:
            screen.d3d.stop()
            break
        except Exception:  # pylint:disable=broad-except
            traceback.print_exc()
            screen.d3d.stop()
            break
Exemple #2
0
    def train_stage1(self):
        """
        In this stage, we will freeze all the convolution blocks and train
        only the newly added dense layers. We will add a global spatial average
        pooling layer, we will add fully connected dense layers on the output
        of the base models. We will freeze the convolution base and train only
        the top layers. We will set all the convolution layers to false, the model
        should be compiled when all the convolution layers are set to false.
        
        Arguments:
            
            -input_params  :  This parameter will contain all the information that the user will
                              input through the terminal
        """

        print(
            "\nTraining the model by freezing the convolution block and tuning the top layers..."
        )
        st = dt.now()

        utils_obj = Utility(self.input_params, self.path_dict)

        #Put if statement here. If model_name != custom then run this block, or else. Do something else.

        if (self.input_params['model_name'] != 'custom'):
            base_model = utils_obj.load_imagenet_model()

            #Adding a global spatial average pooling layer
            x = base_model.output
            x = GlobalAveragePooling2D()(x)

            #Adding a fully-connected dense layer
            #x = Dense(self.input_params['dense_neurons'], activation='relu', kernel_initializer='he_normal')(x)
            #Adding the custom layers
            customlayers = self.input_params['customlayers']
            #Adding a final dense output final layer
            x = customlayers(x)
            n = utils_obj.no_of_classes()
            output_layer = Dense(
                n,
                activation=self.input_params['outputlayer_activation'],
                kernel_initializer='glorot_uniform')(x)
            model_stg1 = Model(inputs=base_model.input, outputs=output_layer)
            #Define the model
            model_stg1 = Model(inputs=base_model.input, outputs=output_layer)

            #Here we will freeze the convolution base and train only the top layers
            #We will set all the convolution layers to false, the model should be
            #compiled when all the convolution layers are set to false
            for layer in base_model.layers:
                layer.trainable = False

        else:
            model_stg1 = self.input_params['custom_model']

        #Compiling the model
        model_stg1.compile(
            optimizer=optimizers.Adam(lr=self.input_params['stage1_lr']),
            loss='categorical_crossentropy',
            metrics=[self.input_params['metric']])

        #Normalize the images
        train_datagen = ImageDataGenerator(
            preprocessing_function=utils_obj.init_preprocess_func())
        val_datagen = ImageDataGenerator(
            preprocessing_function=utils_obj.init_preprocess_func())

        df_train = utils_obj.load_data("train")
        df_val = utils_obj.load_data("val")

        train_generator = train_datagen.flow_from_dataframe(
            dataframe=df_train,
            directory=self.path_dict['source'],
            target_size=utils_obj.init_sizes(),
            x_col="filenames",
            y_col="class_label",
            batch_size=self.input_params['batch_size'],
            class_mode='categorical',
            color_mode='rgb',
            shuffle=True)

        val_generator = val_datagen.flow_from_dataframe(
            dataframe=df_val,
            directory=self.path_dict['source'],
            target_size=utils_obj.init_sizes(),
            x_col="filenames",
            y_col="class_label",
            batch_size=self.input_params['batch_size'],
            class_mode='categorical',
            color_mode='rgb',
            shuffle=True)

        nb_train_samples = len(train_generator.classes)
        nb_val_samples = len(val_generator.classes)

        history = model_stg1.fit_generator(
            generator=train_generator,
            steps_per_epoch=nb_train_samples //
            self.input_params['batch_size'],
            epochs=self.input_params['epochs1'],
            validation_data=val_generator,
            validation_steps=nb_val_samples // self.input_params['batch_size'],
            callbacks=TrainingUtils.callbacks_list(self, 1),
            workers=self.input_params['nworkers'],
            use_multiprocessing=False,
            max_queue_size=20)  #1 for stage 1

        hist_df = pd.DataFrame(history.history)
        hist_csv_file = self.path_dict['model_path'] + "stage{}/".format(
            1) + "{}_history_stage_{}.csv".format(
                self.input_params['model_name'], 1)
        with open(hist_csv_file, mode='w') as file:
            hist_df.to_csv(file, index=None)

        #model_stg1.load_weights(self.path_dict['model_path'] + "stage{}/".format(1) + "{}_weights_stage_{}.hdf5".format(self.input_params['model_name'], 1))
        model_stg1.save(
            self.path_dict['model_path'] + "stage{}/".format(1) +
            "{}_model_stage_{}.h5".format(self.input_params['model_name'], 1))

        TrainingUtils.save_summary(self, model_stg1, 1)
        TrainingUtils.plot_layer_arch(self, model_stg1, 1)

        stage1_params = dict()
        stage1_params['train_generator'] = train_generator
        stage1_params['val_generator'] = val_generator
        stage1_params['nb_train_samples'] = nb_train_samples
        stage1_params['nb_val_samples'] = nb_val_samples

        print("\nTime taken to train the model in stage 1: ", dt.now() - st)

        #Start model evaluation for Stage 1
        eval_utils = EvalUtils(self.input_params, self.path_dict, 1)
        eval_utils.predict_on_test()

        return model_stg1, stage1_params
Exemple #3
0
base_dir = str(Path.home())

default_args = {
    'owner': 'user',
    'depends_on_past': False,
    'start_date': dt.datetime.strptime('2018-07-29T00:00:00',
                                       '%Y-%m-%dT%H:%M:%S'),
    'provide_context': True
}
# Instantiate the DAG
dag = DAG('dag1',
          default_args=default_args,
          schedule_interval='0 0 * * *',
          max_active_runs=1)  # scheduled to run everyday at midnight

util = Utility(news_api_key='', s3_bucket='')


# get all sources in english language
def sources(**kwargs):
    #sourcesCsvString=util.getSources('business','en','in')
    sourcesCsvString = util.getSources(language='en')
    return sourcesCsvString


# get top headlines for list of sources given
def headlines(**kwargs):
    ti = kwargs['ti']
    v1 = ti.xcom_pull(task_ids='gettingsources'
                      )  # xcom pull used to get values from the sources task
    csvFilesList = util.getheadlines(v1)
Exemple #4
0
#!/usr/bin/python3

from pathlib import Path
import os
import sys

libPath = os.path.join(Path(__file__).absolute().parent.parent, 'pyuval/')

sys.path.append(libPath)
from config import Config
from utils import Utility

util = Utility()
config = Config()

memoryData = util.readDataFromMemoy(config.get('ksm:gpg:shm:address'))
print(memoryData)
Exemple #5
0
    def predict_on_test(self):
        """
        This function will load the test dataset, pre-process the test
        images and check the performance of the trained models on unseen
        data. This will also save the confusion matrix and classification
        report as csv file in seperate dataframes for each model and for
        each stage, in the evaluation directory.
        
        Arguments:                    
            
            -size_dict    : Contains information about the image input image sizes for each of the models
                
            -model_name   : Name of the model, for example - vgg16, inception_v3, resnet50 etc
                          
            -stage_no     : The training stage of the model. You will have a choice to select the number
                            of training stages. In stage 1, we only fine tune the top 2 dense layers by
                            freezing the convolution base. In stage 2, we will re adjust the weights trained
                            in stage 1 by training the top convolution layers, by freezing the dense layers.
        """

        print("\nStarting model evaluation for stage {}..".format(
            self.stage_no))

        #Create an utility class object to access the class methods
        utils_obj = Utility(self.input_params, self.path_dict)

        df_test = utils_obj.load_data("test")

        test_datagen = ImageDataGenerator(
            preprocessing_function=utils_obj.init_preprocess_func())

        test_generator = test_datagen.flow_from_dataframe(
            dataframe=df_test,
            directory=self.path_dict['source'],
            target_size=utils_obj.init_sizes(),
            x_col="filenames",
            y_col="class_label",
            batch_size=1,
            class_mode='categorical',
            color_mode='rgb',
            shuffle=False)

        nb_test_samples = len(test_generator.classes)

        model = utils_obj.get_models(self.stage_no)
        class_indices = test_generator.class_indices

        def label_class(cat_name):
            return (class_indices[cat_name])

        df_test['true'] = df_test['class_label'].apply(
            lambda x: label_class(str(x)))
        y_true = df_test['true'].values

        #Predictions (Probability Scores and Class labels)
        y_pred_proba = model.predict_generator(test_generator,
                                               nb_test_samples // 1)
        y_pred = np.argmax(y_pred_proba, axis=1)

        df_test['predicted'] = y_pred
        df_test.to_csv(self.path_dict["eval_path"] +
                       "stage{}/".format(self.stage_no) +
                       '{}_predictions_stage_{}.csv'.format(
                           self.input_params['model_name'], self.stage_no))
        dictionary = dict(zip(df_test.true.values, df_test.class_label.values))

        #Confusion Matrixs
        cm = metrics.confusion_matrix(y_true, y_pred)
        df_cm = pd.DataFrame(cm).transpose()
        df_cm = df_cm.rename(mapper=dict,
                             index=dictionary,
                             columns=dictionary,
                             copy=True,
                             inplace=False)
        df_cm.to_csv(self.path_dict["eval_path"] +
                     "stage{}/".format(self.stage_no) +
                     '{}_cm_stage_{}.csv'.format(
                         self.input_params['model_name'], self.stage_no))
        print('Confusion matrix prepared and saved..')

        #Classification Report
        report = metrics.classification_report(y_true,
                                               y_pred,
                                               target_names=list(
                                                   class_indices.keys()),
                                               output_dict=True)

        df_rep = pd.DataFrame(report).transpose()
        df_rep.to_csv(self.path_dict["eval_path"] +
                      "stage{}/".format(self.stage_no) +
                      '{}_class_report_stage_{}.csv'.format(
                          self.input_params['model_name'], self.stage_no))
        print('Classification report prepared and saved..')

        EvalUtils.plot_confusion_matrix(
            self, y_true, y_pred, list(test_generator.class_indices.keys()))

        #General Metrics
        df_metrics = EvalUtils.get_metrics(self, y_true, y_pred)
        df_metrics.to_csv(self.path_dict["eval_path"] +
                          "stage{}/".format(self.stage_no) +
                          '{}_metrics_stage_{}.csv'.format(
                              self.input_params['model_name'], self.stage_no))

        history_df = pd.read_csv(
            self.path_dict["model_path"] + "stage{}/".format(self.stage_no) +
            "{}_history_stage_{}.csv".format(self.input_params['model_name'],
                                             self.stage_no))

        #Get the train vs validation loss for all epochs
        EvalUtils.plt_epoch_error(self, history_df)

        #Generate a complete report and save it as an HTML file in the evaluation folder location
        EvalUtils.get_complete_report(self, y_true, y_pred, class_indices)
    def __init__(self,
                 env_name,
                 doubleQ=False,
                 dueling=False,
                 perMemory=False,
                 training=True,
                 watch=False):

        pass
        with tf.variable_scope('AgentEnvSteps'):
            self.agentSteps = tf.get_variable(name='agentSteps',
                                              initializer=0,
                                              trainable=False,
                                              dtype=tf.int32)
            self.agentStepsUpdater = self.agentSteps.assign_add(1)

        # keep in order
        self.util = Utility(env_name, doubleQ, dueling, perMemory, training)
        self.env = Environment(env_name, self.util.monitorDir)
        self.state_process = StateProcessor()
        self.num_action = self.env.VALID_ACTIONS
        self.deepNet = Brain(self.num_action, dueling, training)

        self.net_feed = self.deepNet.nn_input
        self.onlineNet = self.deepNet.Q_nn(forSess=True)
        #self.eee = self.add
        self.actions = np.arange(self.num_action)
        self.no_op_max = AgentSetting.no_op_max
        self.startTime = 0.0
        self.duration = 0.0

        self.totalReward = 0.0
        self.countR = 0
        self.training = training
        self.doubleQ = doubleQ
        self.dueling = dueling
        self.perMemory = perMemory
        self.rendering = watch
        pass
        print("POSSIBLE ACTIONS :", self.actions)

        if training:

            self.updates = 0
            self.totalLoss = 0.0
            self.countL = 0

            self.minibatch = AgentSetting.minibatch
            self.replay_memorySize = AgentSetting.replay_memory
            self.t_net_update_freq = AgentSetting.t_net_update_freq
            self.discount_factor = AgentSetting.discount_factor
            self.update_freq = AgentSetting.update_freq

            self.momentum = AgentSetting.momentum

            self.e_greedy_init = AgentSetting.e_greedy_init
            self.e_greedy_final = AgentSetting.e_greedy_final
            self.e_final_at = AgentSetting.e_final_at

            #self.e_decay_rate = (self.e_greedy_init - self.e_greedy_final) / self.e_final_at

            self.epsilon = tf.Variable(0.0,
                                       trainable=False,
                                       dtype=tf.float32,
                                       name="epsilon")
            self.epsilonHolder = tf.placeholder(dtype=tf.float32)
            self.epsilonUpdater = self.epsilon.assign(self.epsilonHolder)

            self.replay_strt_size = AgentSetting.replay_strt_size

            self.global_step = tf.Variable(0,
                                           trainable=False,
                                           name='global_step')

            self.training_hrs = tf.Variable(0.0,
                                            trainable=False,
                                            name='training_hrs')
            self.training_episodes = tf.Variable(0,
                                                 trainable=False,
                                                 name="training_episodes")

            self.training_hrsHolder = tf.placeholder(dtype=tf.float32)
            self.training_hrsUpdater = self.training_hrs.assign_add(
                (self.training_hrsHolder / 60.0) / 60.0)
            self.training_episodesUpdater = self.training_episodes.assign_add(
                1)

            self.targetNet = self.deepNet.T_nn(forSess=True)

            if doubleQ:
                '''DoubleQ aims to reduce overestimations of Q-values by decoupling action selection
					from action evaluation in target calculation'''
                # if double
                # 1- action selection using Q-net(online net)
                self.selectedActionIndices = tf.argmax(self.onlineNet, axis=1)
                self.selectedAction = tf.one_hot(
                    indices=self.selectedActionIndices,
                    depth=self.num_action,
                    axis=-1,
                    dtype=tf.float32,
                    on_value=1.0,
                    off_value=0.0)
                # 2- action evaluation using T-net (target net)
                self.nxtState_qValueSelected = tf.reduce_sum(
                    tf.multiply(self.targetNet,
                                self.selectedAction), axis=1)  # element wise
            else:
                # else
                # 1,2- make a one step look ahead and follow a greed policy
                self.nxtState_qValueSelected = tf.reduce_max(self.targetNet,
                                                             axis=1)

            #3- td-target
            self.td_targetHolder = tf.placeholder(shape=[self.minibatch],
                                                  name='td-target',
                                                  dtype=tf.float32)

            #4- current state chosen action value

            self.actionBatchHolder = tf.placeholder(dtype=tf.uint8)
            self.chosenAction = tf.one_hot(indices=self.actionBatchHolder,
                                           depth=self.num_action,
                                           axis=-1,
                                           dtype=tf.float32,
                                           on_value=1.0,
                                           off_value=0.0)

            self.curState_qValueSelected = tf.reduce_sum(tf.multiply(
                self.onlineNet, self.chosenAction),
                                                         axis=1)  # elementwise

            pass
            self.delta = tf.subtract(self.td_targetHolder,
                                     self.curState_qValueSelected)

            #set learning rate
            self._setLearningRate()
            pass
            #TODO Dueling (rescale and clipping of gradients)
            pass

            if perMemory:

                self.replay_memory = PEM(ArchitectureSetting.in_shape,
                                         self.replay_memorySize)
                self.weightedISHolder = tf.placeholder(shape=[self.minibatch],
                                                       name='weighted-IS',
                                                       dtype=tf.float32)
                self.weightedDelta = tf.multiply(self.delta,
                                                 self.weightedISHolder)
                self.clipped_loss = tf.where(tf.abs(self.weightedDelta) < 1.0,
                                             0.5 *
                                             tf.square(self.weightedDelta),
                                             tf.abs(self.weightedDelta) - 0.5,
                                             name='clipped_loss')
            else:  #not dueling or per

                self.replay_memory = ExperienceMemory(
                    ArchitectureSetting.in_shape, self.replay_memorySize)
                self.clipped_loss = tf.where(tf.abs(self.delta) < 1.0,
                                             0.5 * tf.square(self.delta),
                                             tf.abs(self.delta) - 0.5,
                                             name='clipped_loss')
            pass

            self.loss = tf.reduce_mean(self.clipped_loss, name='loss')

            #$self.loss = tf.reduce_mean(tf.squared_difference(self.td_targetHolder, self.curState_qValueSelected))
            pass
            self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate,
                                                       decay=0.9,
                                                       momentum=self.momentum,
                                                       epsilon=1e-10)
            self.train_step = self.optimizer.minimize(
                self.loss, global_step=self.global_step)

            pass  # https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
            # self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate, decay=0.9, momentum=self.momentum, epsilon=1e-10)
            # self.train_step = self.optimizer.minimize(self.loss,global_step = self.global_step)

        else:
            self.epsilon = tf.constant(AgentSetting.epsilon_eval,
                                       dtype=tf.float32)

        #finallizee
        self.util.summANDsave(self.training)