Ejemplo n.º 1
0
    def __init__(self):
        super().__init__(self.get_prefix, case_insensitive=True)
        logging.init_logging()

        self.db_client = init_client(self.loop)
        if self.db_client: logger.info("Connected to Database.")
        self.config = get_config()
        if self.config["trello"]["enabled"]:
            self.trello_client, self.trello_board = trelloinit(self.config)
        self.admin_db = self.db_client["management"]
        self.users_db = self.db_client["users"]
        if self.config["slothpixel_key"]:
            self.slothpixel_key_string = f'?key={self.config["slothpixel_key"]}'
        else:
            self.slothpixel_key_string = ''
        self.guilds_db = self.db_client["guilds"]
        self.scammer_db = self.db_client["scammer"]
        self.status_list = cycle(self.config["status_list"])
        self.remove_command("help")

        self.api_keys = self.config["api_keys"]

        if not self.api_keys:
            logger.warning(
                "PLEASE SET AT LEAST ON API KEY, ELSE THE BOT WON'T WORK.")

        self.events = []

        self.load_cogs()
Ejemplo n.º 2
0
    def __init__(self):
        super().__init__(self.get_prefix,
                         case_insensitive=True,
                         intents=intents)

        logging.init_logging()

        logger.info([z for z in self.intents])
        self.db_client = init_client(self.loop)
        if self.db_client: logger.info("Connected to Database.")
        self.config = get_config()
        self.custom_emojis = get_config("emojis")
        if self.config["trello"]["enabled"]:
            self.trello_client, self.trello_board = trelloinit(self.config)
        self.admin_db = self.db_client["management"]
        self.users_db = self.db_client["users"]
        if self.config["slothpixel_key"]:
            self.slothpixel_key_string = f'?key={self.config["slothpixel_key"]}'
        else:
            self.slothpixel_key_string = ''
        if self.config["stats_api"] == "default":
            self.stats_api = "http://hypixel-skybot.ddns.net:3000/stats"
        else:
            self.stats_api = self.config["stats_api"]
        self.guilds_db = self.db_client["guilds"]
        self.scammer_db = self.db_client["scammer"]
        self.status_list = cycle(self.config["status_list"])
        self.remove_command("help")

        self.api_keys = self.config["api_keys"]

        if not self.api_keys:
            logger.warning(
                "PLEASE SET AT LEAST ON API KEY, ELSE THE BOT WON'T WORK.")

        self.events = []

        self.slash = SlashCommand(self, sync_commands=True)

        self.load_cogs()
        self.start_time = time()
Ejemplo n.º 3
0
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import sys
import os
sys.path.insert(0, os.path.abspath('.'))

import paddle
from configs import argparser as parser
from utils.logging import init_logging

if __name__ == '__main__':
    args = parser.parse_args()
    if args.is_static:
        from static.train import train
        paddle.enable_static()
    else:
        from dynamic.train import train

    rank = int(os.getenv("PADDLE_TRAINER_ID", 0))
    os.makedirs(args.output, exist_ok=True)
    init_logging(rank, args.output)
    parser.print_args(args)
    train(args)
Ejemplo n.º 4
0
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)
        if torch.cuda.is_available():
            self.device = 'cuda:0'
            print(self.device)
        else:
            self.device = 'cpu'
        
        self.loss_weight = torch.tensor([float(w) for w in self.class_weights.split(',')]).to(self.device)
        if self.exp_name == 'crim13':
            # load input data
            self.train_data = np.load(self.train_data)
            self.test_data = np.load(self.test_data)
            self.valid_data = None
            self.train_labels = np.load(self.train_labels)
            self.test_labels = np.load(self.test_labels)
            self.valid_labels = None
            assert self.train_data.shape[-1] == self.test_data.shape[-1] == self.input_size
            if self.valid_data is not None and self.valid_labels is not None:
                self.valid_data = np.load(self.valid_data)
                self.valid_labels = np.load(self.valid_labels)
                assert valid_data.shape[-1] == self.input_size

            self.batched_trainset, self.validset, self.testset = prepare_datasets(self.train_data, self.valid_data, self.test_data, self.train_labels, self.valid_labels, 
            self.test_labels, normalize=self.normalize, train_valid_split=self.train_valid_split, batch_size=self.batch_size)
        elif self.exp_name == 'mars_an':
            #### start mars
            train_datasets = self.train_data.split(",")
            train_raw_features = []
            train_raw_annotations = []
            for fname in train_datasets:
                data = np.load(fname, allow_pickle=True)
                train_raw_features.extend(data["features"])
                train_raw_annotations.extend(data["annotations"])
            test_data = np.load(self.test_data, allow_pickle=True)

            test_raw_features = test_data["features"]
            test_raw_annotations = test_data["annotations"]
            valid_raw_features = None
            valid_raw_annotations = None
            valid_labels = None
            # Check the # of features of the first frame of the first video
            assert len(train_raw_features[0][0]) == len(test_raw_features[0][0]) == self.input_size

            if self.valid_data is not None:
                valid_data = np.load(self.valid_data, allow_pickle=True)
                valid_raw_features = valid_data["features"]
                valid_raw_annotations = valid_data["annotations"]
                assert len(valid_raw_features[0][0]) == self.input_size

            behave_dict = read_into_dict('../near_code_7keypoints/data/MARS_data/behavior_assignments_3class.txt')
            # Reshape the data to trajectories of length 100
            train_features, train_labels = preprocess(train_raw_features, train_raw_annotations, self.train_labels, behave_dict)
            test_features, test_labels = preprocess(test_raw_features, test_raw_annotations, self.train_labels, behave_dict)
            if valid_raw_features is not None and valid_raw_annotations is not None:
                valid_features, valid_labels = preprocess(valid_raw_features, valid_raw_annotations, self.train_labels, behave_dict)
            self.batched_trainset, self.validset, self.testset  = prepare_datasets(train_features, valid_features, test_features,
                                            train_labels, valid_labels, test_labels,
                                    normalize=self.normalize, train_valid_split=self.train_valid_split, batch_size=self.batch_size)

                            ##### END MARS
        else:
            log_and_print('bad experiment name')
            return
        
        
        # self.fix()

        # add subprogram in
        # if self.device == 'cpu':
        #     self.base_program = CPU_Unpickler(open("%s/subprogram.p" % self.base_program_name, "rb")).load()
        # else:
        #     self.base_program = pickle.load(open("%s/subprogram.p" % self.base_program_name, "rb"))
        if self.device == 'cpu':
            self.base_program = CPU_Unpickler(open("%s.p" % self.base_program_name, "rb")).load()
        else:
            self.base_program = pickle.load(open("%s.p" % self.base_program_name, "rb"))
        
        base_folder = os.path.dirname(self.base_program_name)
        # self.weights_dict = np.load(os.path.join(base_folder,'weights.npy'), allow_pickle=True).item()
        
        
        data = self.base_program.submodules
        l = []
        traverse(data,l)
        log_and_print(l)
        # if self.hole_node_ind < 0:
            # self.hole_node_ind = len(l) + self.hole_node_ind
        #if negative, make it positive
        self.hole_node_ind %= len(l)

        self.hole_node = l[self.hole_node_ind]
        

        #for near on subtree
        self.curr_iter = 0
        self.program_path = None 


        if self.exp_id is not None:
            self.trial = self.exp_id
        if self.eval:
            self.evaluate()
        else:
            now = datetime.now()
            self.timestamp = str(datetime.timestamp(now)).split('.')[0][4:]
            log_and_print(self.timestamp)
            full_exp_name = "{}_{}_{}_{}".format(
            self.exp_name, self.algorithm, self.trial, self.timestamp) #unique timestamp for each near run
            self.save_path = os.path.join(self.save_dir, full_exp_name)
            if not os.path.exists(self.save_path):
                os.makedirs(self.save_path)
            init_logging(self.save_path)
            if self.neurh:

                log_and_print(self.base_program_name)
                self.neural_h()
            else:
                self.run_near()
                self.evaluate_final()
Ejemplo n.º 5
0
    if device != 'cpu':
        lossfxn = lossfxn.cuda()

    train_config = {
        'lr': args.learning_rate,
        'neural_epochs': args.neural_epochs,
        'symbolic_epochs': args.symbolic_epochs,
        'optimizer': optim.Adam,
        'lossfxn': lossfxn,
        'evalfxn': label_correctness,
        'num_labels': args.num_labels
    }

    # Initialize logging
    init_logging(save_path)
    log_and_print("Starting experiment {}\n".format(full_exp_name))

    # Initialize program graph
    program_graph = ProgramGraph(DSL_DICT,
                                 CUSTOM_EDGE_COSTS,
                                 args.input_type,
                                 args.output_type,
                                 args.input_size,
                                 args.output_size,
                                 args.max_num_units,
                                 args.min_num_units,
                                 args.max_num_children,
                                 args.max_depth,
                                 args.penalty,
                                 ite_beta=args.ite_beta)
@logging_function_decorator(level=DEBUG)
def main3():
    """タイムアウトした場合 (sleepを分割)"""
    with time_limit_with_thread(3):
        try:
            for _ in range(10):
                time.sleep(1)
        except TimeoutException:
            logput('timeout')

@logging_function_decorator(level=DEBUG)
def main4():
    """タイムアウトが後処理中に割り込む危険性がある"""
    with time_limit_with_thread(3):
        try:
            time.sleep(1)
        except TimeoutException:
            logput('timeout.')
        finally:
            # 後処理
            logput('post process start.')
            time.sleep(5)
            logput('post process end.')

if __name__ == '__main__':
    init_logging()
    main1()
    main2()
    main3()
    main4()
Ejemplo n.º 7
0
            'propagate': False
        },
        'celery': {
            'handlers': ['console', 'sentry'],
            'level': 'DEBUG',
            'propagate': True
        },
        'app': {
            'handlers': ['console', 'sentry'],
            'level': 'DEBUG',
            'propagate': False
        },
    }
}

logging.init_logging()

REST_FRAMEWORK = {
    'DEFAULT_AUTHENTICATION_CLASSES': [],
    'DEFAULT_PERMISSION_CLASSES': [],
}

# CELERY namespace
# todo: investigate why tasks are not loaded automatically
CELERY_IMPORTS = ('app.channels.at.tasks', 'app.channels.firebase.tasks',
                  'app.channels.smpp.tasks')

BROKER_URL = os.environ['BROKER_URL']

BROKER_USE_SSL = False
Ejemplo n.º 8
0
from flask import Flask, request, Response, jsonify
import json
import os

from utils.helpers import read_file, gen_file_body, gen_error_body, gen_warn_body
from utils.logging import init_logging
from settings import ROOT_DIR

application = Flask(__name__)
init_logging(application, f'{ROOT_DIR}/logs/api.log')

not_found_msg = 'Not found the `file=` pathParam inside the queryString'
not_such_file = 'No such file or directory'


@application.route('/api/logs', methods=['GET'])
def logs():
    if 'file' not in request.args:
        return not_found(not_found_msg)

    filename = request.args['file']
    if not os.path.isfile(filename):
        return not_found(not_such_file)
    body = gen_file_body(filename, list(read_file(filename).splitlines()))

    response = Response(json.dumps(body, indent=4),
                        status=200,
                        mimetype='application/json')
    application.logger.info(f"{response.status}")
    return response
Ejemplo n.º 9
0
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)
        if torch.cuda.is_available():
            self.device = 'cuda:0'
            print(self.device)
        else:
            self.device = 'cpu'
        
        self.loss_weight = torch.tensor([float(w) for w in self.class_weights.split(',')]).to(self.device)
        if self.exp_name == 'crim13' or 'bball' in self.exp_name:
            # load input data
            self.train_data = np.load(self.train_data)
            self.test_data = np.load(self.test_data)
            self.valid_data = None
            self.train_labels = np.load(self.train_labels)
            self.test_labels = np.load(self.test_labels)
            self.valid_labels = None
            assert self.train_data.shape[-1] == self.test_data.shape[-1] == self.input_size
            if self.valid_data is not None and self.valid_labels is not None:
                self.valid_data = np.load(self.valid_data)
                self.valid_labels = np.load(self.valid_labels)
                assert valid_data.shape[-1] == self.input_size

            self.batched_trainset, self.validset, self.testset = prepare_datasets(self.train_data, self.valid_data, self.test_data, self.train_labels, self.valid_labels, 
            self.test_labels, normalize=self.normalize, train_valid_split=self.train_valid_split, batch_size=self.batch_size)
        elif self.exp_name == 'mars_an':
            #### start mars
            train_datasets = self.train_data.split(",")
            train_raw_features = []
            train_raw_annotations = []
            for fname in train_datasets:
                data = np.load(fname, allow_pickle=True)
                train_raw_features.extend(data["features"])
                train_raw_annotations.extend(data["annotations"])
            test_data = np.load(self.test_data, allow_pickle=True)

            test_raw_features = test_data["features"]
            test_raw_annotations = test_data["annotations"]
            valid_raw_features = None
            valid_raw_annotations = None
            valid_labels = None
            # Check the # of features of the first frame of the first video
            assert len(train_raw_features[0][0]) == len(test_raw_features[0][0]) == self.input_size

            if self.valid_data is not None:
                valid_data = np.load(self.valid_data, allow_pickle=True)
                valid_raw_features = valid_data["features"]
                valid_raw_annotations = valid_data["annotations"]
                assert len(valid_raw_features[0][0]) == self.input_size

            behave_dict = read_into_dict('../near_code_7keypoints/data/MARS_data/behavior_assignments_3class.txt')
            # Reshape the data to trajectories of length 100
            train_features, train_labels = preprocess(train_raw_features, train_raw_annotations, self.train_labels, behave_dict)
            test_features, test_labels = preprocess(test_raw_features, test_raw_annotations, self.train_labels, behave_dict)
            if valid_raw_features is not None and valid_raw_annotations is not None:
                valid_features, valid_labels = preprocess(valid_raw_features, valid_raw_annotations, self.train_labels, behave_dict)
            self.batched_trainset, self.validset, self.testset  = prepare_datasets(train_features, valid_features, test_features,
                                            train_labels, valid_labels, test_labels,
                                    normalize=self.normalize, train_valid_split=self.train_valid_split, batch_size=self.batch_size)

                            ##### END MARS
        
        else:
            log_and_print('bad experiment name')
            return
        
        
        now = datetime.now()
        self.timestamp = str(datetime.timestamp(now)).split('.')[0][4:]
        log_and_print(self.timestamp)
        full_exp_name = "{}_{}_{}_{}".format(
        self.exp_name, self.algorithm, self.trial, self.timestamp) #unique timestamp for each near run
        self.save_path = os.path.join(self.save_dir, full_exp_name)
        if not os.path.exists(self.save_path):
            os.makedirs(self.save_path)
        init_logging(self.save_path)

        visited_nodes = set() #dont visit smth thats already been visited

        num_iter = 10 #todo make this a parameter later
        for i in range(num_iter):
            
            l = self.load_base_program() #populates self.base_program
            log_and_print("Base program performance:")
            self.evaluate_final()
            self.hole_node_ind = self.neural_h()
            self.hole_node = l[self.hole_node_ind]
            log_and_print("Node selected: %d" % self.hole_node_ind)

            # set up path to save program
            # self.save_path = os.path.join(self.save_path, str(num_iter))

            #run near
            self.run_near(i)


            #change base program name
            self.base_program_name = os.path.join(self.save_path, "fullprogram_%d" % i)

            #make it rly good
            self.train_more_epochs(self.base_program_name)