Пример #1
0
        if (os.path.exists(model_save_path + "\checkpoint")):
            
            Global_Model.load_model_weights()
            step_model.load_model_weights()

            for env in envs:
                workers.append(Worker(step_model, env, batch_size=batch_size, render=False))
            
            print("Model restored.")
        
        else:
            
            for env in envs:
                workers.append(Worker(step_model, env, batch_size=batch_size, render=False))
            
            print("Creating new model.")
    except:
        print("ERROR: There was an issue loading the model!")
        raise

coordinator = Coordinator(Global_Model, step_model, workers, plot, num_envs, num_epocs, batches_per_epoch, batch_size, gamma, model_save_path, anneling_steps)

# Train and save
if coordinator.run():
    try:
        Global_Model.save_model_weights()
        print("Model saved.")
        print("Now testing results....")
    except:
        print("ERROR: There was an issue saving the model!")
Пример #2
0
    def start(self):
        workers = []
        network_params = (self.NUM_STATE,
                          self._config['Max steps taken per batch'],
                          self.NUM_ACTIONS, self.ACTION_SPACE)

        # Init Global and Local networks. Generate Weights for them as well.
        if self._config['CNN type'] == '':
            self._global_model = AC_Model_Large(self.NUM_STATE,
                                                self.NUM_ACTIONS,
                                                self._config,
                                                is_training=True)
            self._global_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))
            self._step_model = AC_Model_Large(self.NUM_STATE,
                                              self.NUM_ACTIONS,
                                              self._config,
                                              is_training=True)
            self._step_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))
        else:
            self._global_model = CNN_class_import(
                self._config['CNN type'],
                (self.NUM_STATE, self.NUM_ACTIONS, self._config, True))
            self._global_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))
            self._step_model = CNN_class_import(
                self._config['CNN type'],
                (self.NUM_STATE, self.NUM_ACTIONS, self._config, True))
            self._step_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))

        # Load model if exists
        if not os.path.exists(self._model_save_path):
            os.makedirs(self._model_save_path)
        else:
            try:
                if (os.path.exists(self._model_save_path + "\checkpoint")):

                    self._global_model.load_model_weights()
                    self._step_model.load_model_weights()
                    for env in self._envs:
                        workers.append(
                            Worker(self._step_model,
                                   env,
                                   batch_size=self.
                                   _config['Max steps taken per batch'],
                                   render=False))

                    print("Model restored.")

                else:

                    for env in self._envs:
                        workers.append(
                            Worker(self._step_model,
                                   env,
                                   batch_size=self.
                                   _config['Max steps taken per batch'],
                                   render=False))

                    print("Creating new model.")
            except:
                print("ERROR: There was an issue loading the model!")
                raise

        coordinator = Coordinator(self._global_model, self._step_model,
                                  workers, self._plot, self._model_save_path,
                                  self._config)

        # Train and save
        try:
            if coordinator.run():
                try:
                    self._global_model.save_model_weights()
                    print("Model saved.")
                    return True
                except:
                    print("ERROR: There was an issue saving the model!")
                    raise

        except:
            print("ERROR: There was an issues during training!")
            raise
Пример #3
0
    with open(path, action, encoding="utf-8") as f:  # open the file to write
        f.write(data)


# function to pass dictionary to text
def dictToStr(dict):
    strDict = ''
    for k, v in dict.items():
        strDict += f'{str(k)} -> {str(v)}\n'
    return strDict


# Join dataSets and normalize
coord = Coordinator()
# joinedDS = coord.join(pathA, pathB)
# normalize joined Dataset
initPf = pd.read_csv(initPath, delimiter=';', encoding="ISO-8859-1")
initPf[initPf < 0] = 0
# initPf[(np.abs(stats.zscore(initPf)) < 3).all(axis=1)]
normPf = coord.normalize(initPf)
print('normPf ', str(normPf))
writeToFile(str(normPf), initNormed, 'w')

# # Run Spectral clustering for k 2 to k 10
labelsList = coord.runConfig(normPf.head(100))

# Run best k spectral clustering
resultDict = coord.run(normPf.head(100), 3, labelsList)
strResDict = dictToStr(resultDict)
writeToFile(strResDict, resultsPath, 'w')