Exemplo n.º 1
0
    def __init__(
        self,
        character_name,
        class_,
        race,
        STR,
        CON,
        DEX,
        INT,
        WIS,
        CHA,
        trained_skills,
        feats,
        level,
        race_spec=None,
        class_spec=None,
    ):
        self.name = character_name
        self.level = level
        self.race = util.load_module("races", race, race_spec)
        self.class_ = util.load_module("classes", class_, class_spec)
        self.initiative = initiative.Initiative()
        self.ability_scores = ability.AbilityScores(STR, CON, DEX, INT, WIS, CHA)
        self.defenses = defense.Defenses()
        self.skills = skill.Skills(trained_skills, self.race.skills)
        self.speed = speed.Speed(self.race.speed)
        self.feats = list(map(lambda feat: util.load_module("feats", feat, None), feats))
        self.senses = sense.Senses()

        if self.class_.spec is not None:
            specials.specials_map[self.class_.spec](self)
        if self.race.spec is not None:
            specials.specials_map[self.race.spec](self)
        list(map(lambda feat: feat.special(self), self.feats))
Exemplo n.º 2
0
def fit(cnf, predict, per_patient, features_file, n_iter, blend_cnf, test_dir):

    config = util.load_module(cnf).config
    image_files = data.get_image_files(config.get('train_dir'))
    names = data.get_names(image_files)
    labels = data.get_labels(names).astype(np.float32)[:, np.newaxis]

    if features_file is not None:
        runs = {'run': [features_file]}
    else:
        runs = data.parse_blend_config(yaml.load(open(blend_cnf)))

    scalers = {run: StandardScaler() for run in runs}

    tr, te = data.split_indices(image_files, labels)

    y_preds = []
    for i in range(n_iter):
        print("iteration {} / {}".format(i + 1, n_iter))
        for run, files in list(runs.items()):
            print("fitting features for run {}".format(run))
            X = data.load_features(files)
            X = scalers[run].fit_transform(X)
            X = data.per_patient_reshape(X) if per_patient else X
            est = get_estimator(X.shape[1], image_files, labels,
                                eval_size=0.0 if predict else 0.1)
            est.fit(X, labels)
            if not predict:
                y_pred = est.predict(X[te]).ravel()
                y_preds.append(y_pred)
                y_pred = np.mean(y_preds, axis=0)
                y_pred = np.clip(np.round(y_pred).astype(int),
                                 np.min(labels), np.max(labels))
                print("kappa after run {}, iter {}: {}".format(
                    run, i, util.kappa(labels[te], y_pred)))
                print("confusion matrix")
                print(confusion_matrix(labels[te], y_pred))
            else:
                X = data.load_features(files, test=True)
                X = scalers[run].transform(X)
                X = data.per_patient_reshape(X) if per_patient else X
                y_pred = est.predict(X).ravel()
                y_preds.append(y_pred)

    if predict:
        y_pred = np.mean(y_preds, axis=0)
        y_pred = np.clip(np.round(y_pred),
                         np.min(labels), np.max(labels)).astype(int)
        submission_filename = util.get_submission_filename()
        image_files = data.get_image_files(test_dir or config.get('test_dir'))
        names = data.get_names(image_files)
        image_column = pd.Series(names, name='image')
        level_column = pd.Series(y_pred, name='level')
        predictions = pd.concat([image_column, level_column], axis=1)

        print("tail of predictions file")
        print(predictions.tail())

        predictions.to_csv(submission_filename, index=False)
        print("saved predictions to {}".format(submission_filename))
Exemplo n.º 3
0
def main(cnf, weights_from):

    config = util.load_module(cnf).config
    # print(config)
    if weights_from is None:
        weights_from = config.weights_file
    else:
        weights_from = str(weights_from)
    print(config.get('train_dir'))
    files = data.get_image_files(config.get('train_dir'))
    names = data.get_names(files)
    labels = data.get_labels(names).astype(np.float32)
    print("Checkpoint 5")
    net = create_net(config)
    print("Checkpoint 6")
    print(weights_from)
    # print(net.load_params_from())
    try:
        print("Checkpoint 7")
        net.load_params_from(weights_from)
        print("Checkpoint 8")
        print("loaded weights from {}".format(weights_from))
    except IOError:
        print("couldn't load weights starting from scratch")

    print("fitting ...")
    print(files)
    print(labels)
    net.fit(files, labels)
Exemplo n.º 4
0
def main(cnf, weights_from):

    config = util.load_module(cnf).config

    if weights_from is None:
        weights_from = config.weights_file
    else:
        weights_from = str(weights_from)

    files = data.get_image_files(config.get('train_dir'))
    names = data.get_names(files)
    labels = data.get_labels(names).astype(np.float32)

    net = create_net(config)

    try:
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))
    except IOError:
        print("couldn't load weights starting from scratch")
    print("Shape of files: " + str(files.shape))
    print("Shape of labels: " + str(labels.shape))
    start = time.time()
    print("fitting ...")
    net.fit(files, labels)
    end = time.time()
    print("Time elapsed for fitting: " + str(end - start))
Exemplo n.º 5
0
    def GoOn(self, keyword):

        #######################
        #   LOADING MODULES   #
        #######################

        with load_module():

            issetPyperclip = True
            issetWebbrowser = True

            try:
                import pyperclip
            except Exception as e:
                issetPyperclip = False

            try:
                import webbrowser
            except Exception as e:
                issetWebbrowser = False

        if (issetWebbrowser):
            webbrowser.open(keyword)
        if (issetPyperclip):
            pyperclip.copy(keyword)
Exemplo n.º 6
0
    def _install_cmds(self, srv_addr=None):
        """install different command set to nodes according to their type
        can manually set the server address
        """
        scen = load_module(self.options.scenario)
        botmaster_desc = scen.botmaster_desc
        server_desc = scen.server_desc
        client_desc = scen.client_desc
        # botmaster_desc['srv_addr'] = "10.1.1.1"
        print 'srv_addr, ', srv_addr
        if srv_addr:
            botmaster_desc['srv_addr'] = srv_addr
            server_desc['srv_addr'] = srv_addr
            client_desc['srv_addr'] = srv_addr

        for i in xrange(self.node_num):
            if i in self.botmaster_id_set:
                cmd = scen.BotMaster(botmaster_desc)
            elif i in self.server_id_set:
                cmd = scen.ServerCMD(server_desc)
            elif i in self.client_id_set:
                cmd = scen.ClientCMD(client_desc)
            else:
                continue
            cmd.install(self.get_node(i))
Exemplo n.º 7
0
def fit(cnf, predict, per_patient, features_file, n_iter, blend_cnf, test_dir):

    config = util.load_module(cnf).config
    image_files = data.get_image_files(config.get('train_dir'))
    names = data.get_names(image_files)
    labels = data.get_labels(names).astype(np.float32)[:, np.newaxis]

    if features_file is not None:
        runs = {'run': [features_file]}
    else:
        runs = data.parse_blend_config(yaml.load(open(blend_cnf)))

    scalers = {run: StandardScaler() for run in runs}

    tr, te = data.split_indices(image_files, labels)

    y_preds = []
    for i in range(n_iter):
        print("iteration {} / {}".format(i + 1, n_iter))
        for run, files in runs.items():
            print("fitting features for run {}".format(run))
            X = data.load_features(files)
            X = scalers[run].fit_transform(X)
            X = data.per_patient_reshape(X) if per_patient else X
            est = get_estimator(X.shape[1], image_files, labels,
                                eval_size=0.0 if predict else 0.1)
            est.fit(X, labels)
            if not predict:
                y_pred = est.predict(X[te]).ravel()
                y_preds.append(y_pred)
                y_pred = np.mean(y_preds, axis=0)
                y_pred = np.clip(np.round(y_pred).astype(int),
                                 np.min(labels), np.max(labels))
                print("kappa after run {}, iter {}: {}".format(
                    run, i, util.kappa(labels[te], y_pred)))
                print("confusion matrix")
                print(confusion_matrix(labels[te], y_pred))
            else:
                X = data.load_features(files, test=True)
                X = scalers[run].transform(X)
                X = data.per_patient_reshape(X) if per_patient else X
                y_pred = est.predict(X).ravel()
                y_preds.append(y_pred)

    if predict:
        y_pred = np.mean(y_preds, axis=0)
        y_pred = np.clip(np.round(y_pred),
                         np.min(labels), np.max(labels)).astype(int)
        submission_filename = util.get_submission_filename()
        image_files = data.get_image_files(test_dir or config.get('test_dir'))
        names = data.get_names(image_files)
        image_column = pd.Series(names, name='image')
        level_column = pd.Series(y_pred, name='level')
        predictions = pd.concat([image_column, level_column], axis=1)

        print("tail of predictions file")
        print(predictions.tail())

        predictions.to_csv(submission_filename, index=False)
        print("saved predictions to {}".format(submission_filename))
Exemplo n.º 8
0
def transform(cnf, n_iter, skip, test, train, weights_from, test_dir):

    config = util.load_module(cnf).config

    runs = {}
    if train:
        runs["train"] = config.get("train_dir")
    if test or test_dir:
        runs["test"] = test_dir or config.get("test_dir")

    net = nn.create_net(config)

    if weights_from is None:
        net.load_params_from(config.weights_file)
        print("loaded weights from {}".format(config.weights_file))
    else:
        weights_from = str(weights_from)
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))

    if n_iter > 1:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter, skip=skip, color_sigma=config.cnf["sigma"], **config.cnf["aug_params"]
        )
    else:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter, skip=skip, color_sigma=0.0, **data.no_augmentation_params
        )

    for run, directory in sorted(runs.items(), reverse=True):

        print("extracting features for files in {}".format(directory))
        tic = time.time()

        files = data.get_image_files(directory)

        Xs, Xs2 = None, None

        for i, (tf, color_vec) in enumerate(zip(tfs, color_vecs), start=1):

            print("{} transform iter {}".format(run, i))

            X = net.transform(files, transform=tf, color_vec=color_vec)
            if Xs is None:
                Xs = X
                Xs2 = X ** 2
            else:
                Xs += X
                Xs2 += X ** 2

            print("took {:6.1f} seconds".format(time.time() - tic))
            if i % 5 == 0 or n_iter < 5:
                std = np.sqrt((Xs2 - Xs ** 2 / i) / (i - 1))
                config.save_features(Xs / i, i, skip=skip, test=True if run == "test" else False)
                config.save_std(std, i, skip=skip, test=True if run == "test" else False)
                print("saved {} iterations".format(i))
Exemplo n.º 9
0
def transform(cnf=cnf,
              n_iter=n_iter,
              skip=skip,
              test=test,
              train=train,
              weights_from=weights_from,
              test_dir=test_dir):

    config = util.load_module(cnf).config

    config.cnf['batch_size_train'] = 128
    config.cnf['batch_size_test'] = 128

    runs = {}
    if train:
        runs['train'] = config.get('train_dir')
    if test or test_dir:
        runs['test'] = test_dir or config.get('test_dir')

    net = nn.create_net(config)

    if weights_from is None:
        net.load_params_from(config.weights_file)
        print("loaded weights from {}".format(config.weights_file))
    else:
        weights_from = str(weights_from)
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))

    if n_iter > 1:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter,
            skip=skip,
            color_sigma=config.cnf['sigma'],
            **config.cnf['aug_params'])
    else:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter, skip=skip, color_sigma=0.0, **data.no_augmentation_params)

    ret_val = []
    for run, directory in sorted(runs.items(), reverse=True):

        print("extracting features for files in {}".format(directory))
        tic = time.time()
        files = data.get_image_files(directory)

        Xs, Xs2 = None, None

        for i, (tf, color_vec) in enumerate(zip(tfs, color_vecs), start=1):

            print("{} transform iter {}".format(run, i))

            X = net.transform(files[:1000], transform=tf, color_vec=color_vec)
            ret_val.append(X)
    return ret_val, net
Exemplo n.º 10
0
    def _mob_create(self, e, mob_class):
        if not mob_class in self._cache:
            module = load_module(mob_class, f"{config.mobs}/{mob_class}.py")
            self._cache[mob_class] = getattr(module, mob_class.capitalize())

        mob = self._cache[mob_class]()

        self.emit("cmd_load_sprite", mob.entity, frames=(2, 3))

        self.emit("cmd_respawn", mob.entity)
        self.emit("cmd_obj_inited", e.source, entity=mob.entity)
def main(cnf, classes, weights_from, predict):

    config = util.load_module(cnf).config
    files = data.get_image_files(config.get('train_dir'))
    names = data.get_names(files)
    names = [int(x) for x in names]
    data.classes = int(classes)
    labels = data.get_labels(names)
    net = create_net(config)

    print files.shape
    print labels.shape
    if predict:
        if weights_from is None:
            weights_from = config.weights_file
        else:
            weights_from = str(weights_from)
        print weights_from
        try:
            net.load_params_from(weights_from)
            print("loaded weights from {}".format(weights_from))
        except IOError:
            print("couldn't load weights starting from scratch")
    if not predict:
        print("fitting ...")
        net.fit(files, labels)
    else:
        print("predicting ...")
        test_files = data.get_image_files(config.get('test_dir'))
        y_pred = net.predict(test_files)
        y_pred = y_pred.transpose()
        print y_pred
        y_pred = np.clip(np.round(y_pred), np.min(labels),
                         np.max(labels)).astype(int)
        #print y_pred
        submission_filename = util.get_submission_filename()
        image_files = data.get_image_files(config.get('test_dir'))
        names = data.get_names(image_files)
        image_column = pd.Series(names, name='photo_id')
        level_column = pd.DataFrame(y_pred)  #name='labels')
        level_column = level_column.apply(lambda x: string_submit(x))
        predictions = pd.concat([image_column, level_column], axis=1)
        print("tail of predictions file")
        print(predictions.tail())
        predictions.columns = ['photo_id', 'labels']
        predictions.to_csv(submission_filename, index=False)
        print("saved predictions to {}".format(submission_filename))
def main(cnf, classes, weights_from, predict):

    config = util.load_module(cnf).config
    files = data.get_image_files(config.get('train_dir'))
    names = data.get_names(files)
    names = [int(x) for x in names ]
    data.classes = int(classes)
    labels = data.get_labels(names)
    net = create_net(config)
    
    print files.shape
    print labels.shape
    if predict : 
    	if weights_from is None:
        	weights_from = config.weights_file
    	else:
        	weights_from = str(weights_from)
	print weights_from    
    	try:
        	net.load_params_from(weights_from)
        	print("loaded weights from {}".format(weights_from))
    	except IOError:
        	print("couldn't load weights starting from scratch")
    if not predict:
    	print("fitting ...")
    	net.fit(files, labels)
    else:
	print("predicting ...")
    	test_files = data.get_image_files(config.get('test_dir'))
    	y_pred = net.predict(test_files)
	y_pred = y_pred.transpose()
	print y_pred
        y_pred = np.clip(np.round(y_pred),
                         np.min(labels), np.max(labels)).astype(int)
        #print y_pred
	submission_filename = util.get_submission_filename()
        image_files = data.get_image_files(config.get('test_dir'))
        names = data.get_names(image_files)
        image_column = pd.Series(names, name='photo_id')
        level_column = pd.DataFrame(y_pred)#name='labels')
	level_column = level_column.apply(lambda x : string_submit(x))        
        predictions = pd.concat([image_column, level_column], axis=1)
        print("tail of predictions file")
        print(predictions.tail())
	predictions.columns = ['photo_id', 'labels']
        predictions.to_csv(submission_filename, index=False)
        print("saved predictions to {}".format(submission_filename))
    def setup(self, bottom, top):
        """Setup the ResamplerDataLayer."""
        # parse the layer parameter string
        layer_config = self.param_str
	self.config = util.load_module(layer_config).config
        filenames = data.get_sentence(self.config.get('datafile'))
        labels = data.get_labels(self.config.get('labelfile'))
	self.sampleIter = iterator.SharedIterator(self.config, deterministic=True,batch_size=self.config.get('batch_size'))
	self.iterator = iter(self.sampleIter(filenames,labels))

        self._name_to_top_map = {
            'data': 0,
            'labels': 1}

        top[0].reshape(self.config.get('batch_size'), 3, self.config.get('h'), self.config.get('w'))

        top[1].reshape(self.config.get('batch_size'))
Exemplo n.º 14
0
def main(cnf, weights_from, fold, exp_run_folder, train_retina):
    config = util.load_module(cnf).config
    config.cnf[
        'fold'] = fold  # <-- used to change the directories for weights_best, weights_epoch and weights_final
    config.cnf['exp_run_folder'] = exp_run_folder
    protocol = data.settings['protocol']

    if train_retina != 'train_retina':
        folds = yaml.load(open('folds/' + protocol + '.yml'))
        f0, f1 = fold.split('x')
        train_list = folds['Fold_' + f0][int(f1) - 1]
        files = data.get_image_files(config.get('train_dir'), train_list)
    else:
        files = data.get_image_files(config.get('train_dir'))

    if weights_from is None:
        weights_from = config.weights_file
    else:
        weights_from = str(weights_from)

    names = data.get_names(files)
    labels = data.get_labels(names, label_file='folds/' + protocol +
                             '.csv').astype(np.int32)
    net = nn.create_net(config)

    try:
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))
    except IOError:
        print("couldn't load weights, starting from scratch")

    #Print layerinfo
    print("## Layer information")
    import nolearn
    layer_info = nolearn.lasagne.PrintLayerInfo()
    print(layer_info._get_greeting(net))
    layer_info, legend = layer_info._get_layer_info_conv(net)
    print(layer_info)
    print(legend)
    print("fitting ...")
    net.fit(files, labels)
Exemplo n.º 15
0
def main(cnf, weights_from):

    config = util.load_module(cnf).config

    if weights_from is None:
        weights_from = config.weights_file
    else:
        weights_from = str(weights_from)

    files = data.get_image_files(config.get('train_dir'))
    names = data.get_names(files)
    labels = data.get_labels(names).astype(np.float32)

    net = create_net(config)

    try:
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))
    except IOError:
        print("couldn't load weights starting from scratch")

    print("fitting ...")
    net.fit(files, labels)
Exemplo n.º 16
0
def build(cnf, weights_from):

    config = util.load_module(cnf).config

    if weights_from is None:
        weights_from = config.weights_file
    else:
        weights_from = str(weights_from)

    files = data.get_image_files(config.get('train_dir'))
    names = data.get_names(files)
    labels = data.get_labels(names).astype(np.float32)

    net = create_net(config)

    try:
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))
    except IOError:
        print("couldn't load weights starting from scratch")

    print("fitting ...")
    # net.fit(files, labels)
    return net, files, names, labels
Exemplo n.º 17
0
#@click.option('--blend_cnf', default='blend.yml', show_default=True,
#              help="Blending configuration file.")
#@click.option('--test_dir', default=None, show_default=True,
#              help="Override directory with test set images.")
cnf = 'configs/c_512_5x5_32.py'
predict = True
per_patient = True
features_file = None
n_iter =3 
blend_cnf = 'blend.yml'
test_dir = None


#def fit(cnf, predict, per_patient, features_file, n_iter, blend_cnf, test_dir):

config = util.load_module(cnf).config
image_files = data.get_image_files(config.get('train_dir'))
names = data.get_names(image_files)
labels = data.get_labels(names).astype(np.float32)[:, np.newaxis]

if features_file is not None:
    runs = {'run': [features_file]}
else:
    runs = data.parse_blend_config(yaml.load(open(blend_cnf)))

scalers = {run: StandardScaler() for run in runs}

tr, te = data.split_indices(image_files, labels)

y_preds = []
for i in range(n_iter):
Exemplo n.º 18
0
def transform(cnf, n_iter, skip, test, train, weights_from, test_dir):

    config = util.load_module(cnf).config

    config.cnf['batch_size_train'] = 128
    config.cnf['batch_size_test'] = 128

    runs = {}
    if train:
        runs['train'] = config.get('train_dir')
    if test or test_dir:
        runs['test'] = test_dir or config.get('test_dir')

    net = nn.create_net(config)

    if weights_from is None:
        net.load_params_from(config.weights_file)
        print("loaded weights from {}".format(config.weights_file))
    else:
        weights_from = str(weights_from)
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))

    if n_iter > 1:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter,
            skip=skip,
            color_sigma=config.cnf['sigma'],
            **config.cnf['aug_params'])
    else:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter, skip=skip, color_sigma=0.0, **data.no_augmentation_params)

    for run, directory in sorted(runs.items(), reverse=True):

        print("extracting features for files in {}".format(directory))
        tic = time.time()

        files = data.get_image_files(directory)

        Xs, Xs2 = None, None

        for i, (tf, color_vec) in enumerate(zip(tfs, color_vecs), start=1):

            print("{} transform iter {}".format(run, i))

            X = net.transform(files, transform=tf, color_vec=color_vec)
            if Xs is None:
                Xs = X
                Xs2 = X**2
            else:
                Xs += X
                Xs2 += X**2

            print('took {:6.1f} seconds'.format(time.time() - tic))
            if i % 10 == 0 or n_iter < 5:
                std = np.sqrt((Xs2 - Xs**2 / i) / (i - 1))
                config.save_features(Xs / i,
                                     i,
                                     skip=skip,
                                     test=True if run == 'test' else False)
                config.save_std(std,
                                i,
                                skip=skip,
                                test=True if run == 'test' else False)
                print('saved {} iterations'.format(i))
Exemplo n.º 19
0
        node.start()
    
    run_map = {
        'server':run_server,
        'client':run_client,
        'botmaster':run_botmaster,
    }

def parse_arguments():
    parser = argparse.ArgumentParser(description='imalse')
    
    scenario_ops = ut.get_scenario_option()
    parser.add_argument('-s', '--scenario', default='None',
            help='specify the scenario you want to execute. Scenearios availiable are: %s'%(scenario_ops )
            )
    
    parser.add_argument('-r', '--role', default='None',
            help='specify the role you want to emulate, 1.[server], 2.[client], 3.[botmaster]'
            )
    args = parser.parse_args()
    if args.scenario not in scenario_ops:
        parser.print_help()
        exit()
    return args

args = parse_arguments()
scen = ut.load_module(args.scenario)
print scen
Emulator().run_map[args.role](scen)

Exemplo n.º 20
0
from util import WoxEx, WoxAPI, load_module, Log

with load_module():
    import sqlite3
    import configparser
    import os
    import webbrowser
    import json
    import browser
    from os import path
    from typing import List

PROFILE_INI = "profiles.ini"
PLACES_SQLITE = 'places.sqlite'
CONFIG_JSON = 'config.json'

CONFIG_JSON_PATH = os.path.abspath(os.getcwd()) + '\\' + CONFIG_JSON

DEFAULT_CONFIG = {
    "db_path": "",
    "enable_history": False
}

DEFAULT_CONTEXT = [{
    'Title': f'Open config.json',
    'SubTitle': f'',
    'IcoPath': 'img\\config.ico',
    'JsonRPCAction': {
        'method': 'open_config',
        'parameters': [CONFIG_JSON_PATH]
    }
Exemplo n.º 21
0
    def query(self, keyword):

        results = list()

        #######################
        #   LOADING MODULES   #
        #######################

        with load_module():

            issetPyperclip = True
            issetRequests = True
            issetWebbrowser = True
            issetOs = True

            try:
                import pyperclip
            except Exception as e:
                issetPyperclip = False

            try:
                import requests
            except Exception as e:
                issetRequests = False

            try:
                import webbrowser
            except Exception as e:
                issetWebbrowser = False

            try:
                from os import path
            except Exception as e:
                issetOs = False

            requirements = False

            if (issetRequests == False):
                if (requirements == False):
                    requirements = ''
                else:
                    requirements = requirements + ', '
                requirements = requirements + 'requests'

            if (issetOs == False):
                if (requirements == False):
                    requirements = ''
                else:
                    requirements = requirements + ', '
                requirements = requirements + 'os'

            suggestions = False

            if (issetPyperclip == False):
                if (suggestions == False):
                    suggestions = ''
                else:
                    suggestions = suggestions + ', '
                suggestions = suggestions + 'pyperclip'

            if (issetWebbrowser == False):
                if (suggestions == False):
                    suggestions = ''
                else:
                    suggestions = suggestions + ', '
                suggestions = suggestions + 'webbrowser'

            if requirements != False:
                results.append({
                    "Title": 'Following modules are needed to install:',
                    "SubTitle": 'Modules: ' + requirements +
                    '. (ONLY MODULES ARE INSTALLED, THE PLUGIN CAN PERFORMING SUCCESSFUALLY. )',
                    "IcoPath": "Images/ico.ico",
                    "JsonRPCAction": {
                        "method":
                        "GoOn",
                        "parameters": [
                            'https://github.com/xiawenke/WordReference_Wox_Plugin',
                            issetWebbrowser, issetPyperclip
                        ],
                        "dontHideAfterAction":
                        False
                    }
                })
                return results

            if (suggestions != False) and (len(keyword.split()) == 0):
                results.append({
                    "Title":
                    '--------- Recommended Plugin Installation ---------',
                    "SubTitle": 'Modules: ' + suggestions +
                    '. (Only those modules are installed, the plugin can perform its full features.)',
                    "IcoPath": "Images/ico.ico",
                    "JsonRPCAction": {
                        "method":
                        "GoOn",
                        "parameters": [
                            'https://github.com/xiawenke/WordReference_Wox_Plugin',
                            issetWebbrowser, issetPyperclip
                        ],
                        "dontHideAfterAction":
                        False
                    }
                })

        ################################
        #     PROCESS CONFIG FILES     #
        ################################

        ### config.txt ###
        try:
            configOpen = open('config.txt')
            configLines = configOpen.readline()
            while configLines:
                config = configLines.split()

                if config[0] == 'DefaultDictionary:':
                    DefaultDictionaryVar = config[1]

                if config[0] == 'APIUrl:':
                    APIUrlVar = config[1]

                configLines = configOpen.readline()

            configOpen.close()

            DefaultDictionary = DefaultDictionaryVar
            APIUrl = APIUrlVar

        except Exception as identifier:
            # If the file not existed, create it.
            DefaultDictionary = 'enzh'
            APIUrl = 'http://wr.miku-miku.online/'
            open('config.txt', 'w+').write(
                "DefaultDictionary: enzh\nAPIUrl: http://wr.miku-miku.online/")

        ### LanguageList.wr ###
        try:
            lLOpen = open('LanguageList.wr')

            if ('###FILE_VALID###' in lLOpen.readline()) == 0:
                raise RuntimeError('INVALID_LL')

            lLOpen.close()

        except Exception as identifier:
            # If the file not existed, download it.
            recoveryURL = APIUrl + '?getLanguageList=True'
            try:
                LanguageList = requests.get(recoveryURL).text
            except Exception as identifier:
                # Maybe A NetWork Error.
                LanguageList = 'NULL'
            open('LanguageList.wr', 'w+').write(LanguageList)

        lLOpen = open('LanguageList.wr')
        lLLines = lLOpen.readline()
        lLPrepared = list()

        while lLLines:
            thisLine = lLLines.split()
            for i in thisLine:
                lLPrepared.append(i)
            lLLines = lLOpen.readline()

        lLOpen.close()

        ############################
        #     PREPARE VARIBLES     #
        ############################

        thisWord = 'Unset'
        thisDefinition = 'Unavailable'
        gotoURL = 'http://www.wordreference.com/'
        GitHubLink = 'https://github.com/xiawenke/WordReference_Wox_Plugin'
        apiUrl = APIUrl + '?url=&URL&'
        apiUrlV2ByWord = APIUrl + '/?word=&WORD&'
        apiUrlV2ByDict = APIUrl + '/?word=&WORD&&dict=&DICT&'

        ### Split Keywords ###
        seperatedKeys = keyword.split()
        thisDefinition = 'Go on search with keywords.'

        ### Show User Manual ###
        if len(seperatedKeys) == 0:
            supportedCommands = list()
            supportedCommands.append({
                'Name':
                'Simple Search (Using Default Dictionary)',
                'Command':
                'Command: wr <Word> | Example: wr hello'
            })
            supportedCommands.append({
                'Name':
                'Simple Search (Using Specific Dictionary)',
                'Command':
                'Command: wr <Dictionary> <Word> | Example: wr enja hello'
            })
            supportedCommands.append({
                'Name':
                'Multi-Compare Seach (Using Default Dictionary)',
                'Command':
                'Command: wr [<Word1>, <Word2>, <...>] | Example: wr [program, code, software]'
            })
            supportedCommands.append({
                'Name':
                'Multi-Compare Seach (Using Spechfic Dictionary)',
                'Command':
                'Command: wr [<Word1>(<Dict1>), <Word2>(<Dict2>), <...>(...)] | Example: wr [hello(enja), hello(enzh), world(enpt)]'
            })
            supportedCommands.append({
                'Name': 'Show All Supported Dictionaries',
                'Command': 'Command: wr show dictionaries'
            })

            for thisCommand in supportedCommands:
                results.append({
                    "Title": thisCommand['Name'],
                    "SubTitle": thisCommand['Command'],
                    "IcoPath": "Images/ico.ico",
                    "JsonRPCAction": {
                        "method": "GoOn",
                        "parameters":
                        [GitHubLink, issetWebbrowser, issetPyperclip],
                        "dontHideAfterAction": False
                    }
                })

        if len(seperatedKeys) > 0:

            ###########################
            #     ESTABLISH QUERY     #
            ###########################

            specifiedLang = seperatedKeys[0]

            ### Specific language ###
            if specifiedLang in lLPrepared:
                thisDefinition = "Set specific language [" + specifiedLang + ']'
                if len(seperatedKeys) > 1:
                    thisWord = seperatedKeys[1]
                    thisDefinition = 'Click to Search ' + thisWord

                    # Establish API Connection...
                    try:
                        apiReturn = requests.get(
                            apiUrlV2ByDict.replace('&WORD&', thisWord).replace(
                                '&DICT&', specifiedLang))
                        apiReturn = apiReturn.text
                    except Exception as identifier:
                        # Maybe A NetWork Error.
                        apiReturn = '0'

            ### Word Comparing Search ###
            elif ('[' in specifiedLang) or ('【' in specifiedLang):

                multiList = keyword
                multiList = multiList.replace(',', ',').replace(
                    '【', '[').replace('】', ']').replace('(', '(').replace(
                        ')', ')')  # Chinese symbol support.

                thisTitle = 'Muti-Searching: Syntax Error'
                thisSub = 'Failed to interpret the command: ' + multiList
                thisP = 'https://wordreference.com'

                try:
                    words = multiList.replace('[',
                                              '').replace(']',
                                                          '').replace(' ', '')
                    words = words.split(',')

                    ## Search words ##
                    for thisWord in words:

                        ## Specific Dictionary ##
                        try:
                            if ('(' in thisWord) and (')' in thisWord):
                                thisDictionary = thisWord[thisWord.find('(') +
                                                          1:thisWord.find(')')]
                                thisWord = thisWord.replace('(', '').replace(
                                    ')', '').replace(thisDictionary, '')
                            else:
                                raise Exception('NO_CHOOSENED_DICT')
                        except Exception as identifier:
                            thisDictionary = DefaultDictionary

                        ## Establish Connection ##
                        try:
                            thisApiReturn = requests.get(
                                apiUrlV2ByDict.replace('&WORD&',
                                                       thisWord).replace(
                                                           '&DICT&',
                                                           thisDictionary))
                            thisApiReturn = thisApiReturn.text
                        except Exception as identifier:
                            thisTitle = 'Connection Error...'
                            thisSub = 'Check your internet connections.'

                        thisApiReturn = thisApiReturn.split('|')

                        ## Fetch Result ##
                        try:
                            thisDef = thisApiReturn[1].split('%' + '%')
                            thisLink = thisDef[2]
                            thisDef = thisDef[1]
                        except Exception as identifier:
                            thisDef = 'No Definition.'
                            thisLink = 'https://wordreference.com'

                        results.append({
                            "Title": thisWord,
                            "SubTitle": thisDef,
                            "IcoPath": "Images/ico.ico",
                            "JsonRPCAction": {
                                "method":
                                "GoOn",
                                "parameters":
                                [thisLink, issetWebbrowser, issetPyperclip],
                                "dontHideAfterAction":
                                False
                            }
                        })

                    thisTitle = 'del'

                except Exception as identifier:
                    pass

                if thisTitle != 'del':
                    results.append({
                        "Title": thisTitle,
                        "SubTitle": thisSub,
                        "IcoPath": "Images/ico.ico",
                        "JsonRPCAction": {
                            "method": "GoOn",
                            "parameters":
                            [thisP, issetWebbrowser, issetPyperclip],
                            "dontHideAfterAction": False
                        }
                    })

            ### Wrong Dictionary Name ###
            elif len(seperatedKeys) > 1:

                lLOpen = open('LanguageList.wr')
                lLLines = lLOpen.readline()
                lLLines = lLOpen.readline()
                thisLanguage = 'UNKNOWN_LANGUAGE'

                results.append({
                    "Title": 'Omm... Wrong Dictionary Name!',
                    "SubTitle": 'Check dictionaries below:',
                    "IcoPath": "Images/ico.ico",
                    "JsonRPCAction": {
                        "method":
                        "GoOn",
                        "parameters": [
                            APIUrl + '?getLanguageList=True', issetWebbrowser,
                            issetPyperclip
                        ],
                        "dontHideAfterAction":
                        False
                    }
                })

                # Show dictionaries.
                while lLLines:
                    lLLines = lLLines.replace('\n', '')
                    if ('/**' in lLLines) or (len(lLLines) < 1):
                        pass
                    else:
                        try:
                            splitedLl = lLLines.split(' - ')
                            thisLanguage = splitedLl[0]
                            thisName = splitedLl[1]
                        except Exception as identifier:
                            thisName = lLLines

                        results.append({
                            "Title":
                            thisName,
                            "SubTitle":
                            'Command: wr ' + thisLanguage + ' <YOUR WORD>',
                            "IcoPath":
                            "Images/ico.ico",
                            "JsonRPCAction": {
                                "method":
                                "GoOn",
                                "parameters": [
                                    APIUrl + '?getLanguageList=True',
                                    issetWebbrowser, issetPyperclip
                                ],
                                "dontHideAfterAction":
                                False
                            }
                        })
                    lLLines = lLOpen.readline()

                lLOpen.close()

            ### No specific language ###
            else:
                thisWord = specifiedLang
                # Establish API Connection...
                try:
                    apiReturn = requests.get(
                        apiUrlV2ByDict.replace('&WORD&', thisWord).replace(
                            '&DICT&', DefaultDictionary))
                    apiReturn = apiReturn.text
                except Exception as identifier:
                    # Maybe A NetWork Error.
                    apiReturn = '0'

            #########################
            #     FETCH RESULTS     #
            #########################

            try:
                # Make Sure if there's a result.
                if (apiReturn != 'Array') & (apiReturn != '0'):
                    apiReturn = apiReturn.split("|")
                    for thisApiReturn in apiReturn:
                        if (thisApiReturn != 'Array'):
                            thisReturn = thisApiReturn.split('%' + '%')
                            thisWord = thisReturn[0]
                            thisDefinition = thisReturn[1]
                            gotoURL = thisReturn[2]

                            results.append({
                                "Title": thisWord,
                                "SubTitle": thisDefinition,
                                "IcoPath": "Images/ico.ico",
                                "JsonRPCAction": {
                                    "method":
                                    "GoOn",
                                    "parameters":
                                    [gotoURL, issetWebbrowser, issetPyperclip],
                                    "dontHideAfterAction":
                                    False
                                }
                            })

                # NetWork Error State.
                elif apiReturn == '0':
                    thisWord = 'NetWork Error'
                    thisDefinition = 'Please check the network connection...'
                    results.append({
                        "Title": thisWord,
                        "SubTitle": thisDefinition,
                        "IcoPath": "Images/ico.ico",
                        "JsonRPCAction": {
                            "method":
                            "GoOn",
                            "parameters":
                            [gotoURL, issetWebbrowser, issetPyperclip],
                            "dontHideAfterAction":
                            False
                        }
                    })

                # Word Not Found State.
                else:
                    thisDefinition = 'Definition Not Found. (Click to go details)'
                    results.append({
                        "Title": thisWord,
                        "SubTitle": thisDefinition,
                        "IcoPath": "Images/ico.ico",
                        "JsonRPCAction": {
                            "method":
                            "GoOn",
                            "parameters":
                            [gotoURL, issetWebbrowser, issetPyperclip],
                            "dontHideAfterAction":
                            False
                        }
                    })

            except Exception as identifier:
                pass

        #################################
        #     NULL RESULTS RESPONSE     #
        #################################

        # If nothing in results, show the state.
        if len(results) == 0:
            results.append({
                "Title": thisWord,
                "SubTitle": thisDefinition,
                "IcoPath": "Images/ico.ico",
                "JsonRPCAction": {
                    "method": "GoOn",
                    "parameters": [gotoURL, issetWebbrowser, issetPyperclip],
                    "dontHideAfterAction": False
                }
            })

        return results
Exemplo n.º 22
0
def fit(cnf, exp_run_folder, classifier, features_file, n_iter, blend_cnf,
        test_dir, fold):

    config = util.load_module(cnf).config
    config.cnf[
        'fold'] = fold  # <-- used to change the directories for weights_best, weights_epoch and weights_final
    config.cnf['exp_run_folder'] = exp_run_folder

    folds = yaml.load(open('folds/' + data.settings['protocol'] + '.yml'))
    f0, f1 = fold.split('x')
    train_list = folds['Fold_' + f0][int(f1) - 1]
    test_list = folds['Fold_' + f0][0 if f1 == '2' else 1]

    image_files = data.get_image_files(config.get('train_dir'), train_list)
    names = data.get_names(image_files)
    labels = data.get_labels(names,
                             label_file='folds/' + data.settings['protocol'] +
                             '.csv').astype(np.int32)[:, np.newaxis]

    if features_file is not None:
        runs = {'run': [features_file]}
    else:
        runs = {
            run: [
                os.path.join(exp_run_folder + '/data/features', f)
                for f in files
            ]
            for run, files in yaml.load(open(blend_cnf)).items()
        }

    scalers = {run: StandardScaler() for run in runs}

    y_preds = []
    y_preds_proba = []
    for i in range(n_iter):
        print("iteration {} / {}".format(i + 1, n_iter))
        for run, files in runs.items():
            files = [
                f.replace('f0xf1.npy', '{}.npy'.format(fold)) for f in files
            ]

            if classifier is None:
                X_test = data.load_features(files, test=True)
                if data.settings['protocol'] != 'protocol3':
                    y_pred_proba = X_test
                    y_proba = []
                    for i in range(0, len(X_test)):
                        y_proba.append(
                            y_pred_proba[i][1])  #using score from the positive
                    y_pred = np.clip(np.round(y_proba), 0, 1).astype(int)
                else:
                    y_pred_proba = est.predict_proba(X)
            else:
                print("fitting features for run {}".format(run))
                X_train = data.load_features(files)
                l2Norm = np.linalg.norm(X_train, axis=1)
                X_train = np.divide(X_train.T, l2Norm).T
                est = estimator(data.settings['protocol'],
                                classifier,
                                X_train.shape[1],
                                image_files,
                                X_train,
                                labels,
                                run,
                                fold,
                                eval_size=0.1)
                open(
                    exp_run_folder +
                    "/best_estimator_fold_{}.txt".format(fold),
                    "w").write(str(est))
                X_test = data.load_features(files, test=True)
                l2Norm = np.linalg.norm(X_test, axis=1)
                X_test = np.divide(X_test.T, l2Norm).T
                if data.settings['protocol'] != 'protocol3':
                    y_pred = est.predict(X_test).ravel()
                    y_pred_proba = est.predict_proba(X_test).ravel()
                    y_proba = []
                    for i in range(0, 2 * len(X_test), 2):
                        y_proba.append(
                            y_pred_proba[i +
                                         1])  #using score from the positive
                else:
                    y_pred_binary = est.predict(X_test)
                    y_pred = preprocessing.LabelBinarizer().fit([0, 1, 2])
                    y_pred = y_pred.inverse_transform(y_pred_binary)
                    y_proba = est.predict_proba(X_test)

    image_files = data.get_image_files(test_dir or config.get('test_dir'),
                                       test_list)
    names = data.get_names(image_files)
    labels = data.get_labels(
        names, label_file='folds/' + data.settings['protocol'] +
        '.csv').astype(np.int32)[:, np.newaxis]  # , per_patient=per_patient

    image_column = pd.Series(names, name='image')
    labels_column = pd.Series(np.squeeze(labels), name='true')

    level_column = pd.Series(y_pred, name='pred')
    if data.settings['protocol'] != 'protocol3':
        proba_column = pd.Series(y_proba, name='proba')
        predictions = pd.concat(
            [image_column, labels_column, level_column, proba_column], axis=1)
    else:
        proba_label_0 = pd.Series(y_proba[:, 0], name='proba_label_0')
        proba_label_1 = pd.Series(y_proba[:, 1], name='proba_label_1')
        proba_label_2 = pd.Series(y_proba[:, 2], name='proba_label_2')
        predictions = pd.concat([
            image_column, labels_column, level_column, proba_label_0,
            proba_label_1, proba_label_2
        ],
                                axis=1)

    predictions.to_csv(exp_run_folder +
                       "/ranked_list_fold_{}.csv".format(fold),
                       sep=';')

    print("tail of predictions")
    print(predictions.tail())
    acc = len(filter(lambda
                     (l, y): l == y, zip(labels, y_pred))) / float(len(labels))
    print("accuracy: {}".format(acc))
    print("confusion matrix")
    print(confusion_matrix(labels, y_pred))

    if data.settings['protocol'] != 'protocol3':
        auc = calc_auc(y_proba, labels, exp_run_folder, classifier, fold)
        print("AUC: {}".format(auc))
        average_precision = average_precision_score(labels, y_proba)
        print("average precision: {}".format(average_precision))
        c_matrix = confusion_matrix(labels, y_pred)
        print("sensitivity: {}".format(c_matrix[1][1] /
                                       (c_matrix[1][1] + c_matrix[0][1])))
        print("specificity: {}".format(c_matrix[0][0] /
                                       (c_matrix[0][0] + c_matrix[1][0])))
    else:
        y_test = label_binarize(labels, classes=[0, 1, 2])
        auc = roc_auc_score(y_test, y_proba, average='macro')
        print("AUC: {}".format(auc))
        average_precision = average_precision_score(y_test,
                                                    y_proba,
                                                    average="macro")
        print("mean average precision: {}".format(average_precision))

    results = pd.concat([
        pd.Series(exp_run_folder, name='folder'),
        pd.Series(fold, name='fold'),
        pd.Series(auc, name='auc'),
        pd.Series(average_precision, name='ap'),
        pd.Series(acc, name='acc')
    ],
                        axis=1)
    with open('results.csv', 'a') as f:
        results.to_csv(f, header=False)
Exemplo n.º 23
0
def transform(cnf, exp_run_folder, n_iter, skip, test, train, weights_from,  test_dir, fold):

    config = util.load_module(cnf).config
    config.cnf['fold'] = fold                           # <-- used to change the directories for weights_best, weights_epoch and weights_final
    config.cnf['exp_run_folder'] = exp_run_folder

    runs = {}
    if train:
        runs['train'] = config.get('train_dir')
    if test or test_dir:
        runs['test'] = test_dir or config.get('test_dir')

    folds = yaml.load(open('folds/'+data.settings['protocol']+'.yml'))
    f0, f1 = fold.split('x')
    train_list = folds['Fold_' + f0][int(f1)-1]
    test_list  = folds['Fold_' + f0][0 if f1=='2' else 1]

    net = nn.create_net(config)

    if weights_from is None:
        net.load_params_from(config.weights_file)
        print("loaded weights from {}".format(config.weights_file))
    else:
        weights_from = str(weights_from)
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))

    if n_iter > 1:
        tfs, color_vecs = tta.build_quasirandom_transforms(
                n_iter, skip=skip, color_sigma=config.cnf['sigma'],
                **config.cnf['aug_params'])
    else:
        tfs, color_vecs = tta.build_quasirandom_transforms(
               n_iter, skip=skip, color_sigma=0.0,
                **data.no_augmentation_params)

    for run, directory in sorted(runs.items(), reverse=True):

        print("extracting features for files in {}".format(directory))
        tic = time.time()

        if run == 'train':
            files = data.get_image_files(directory, train_list)
        else:
            files = data.get_image_files(directory, test_list)

        Xs, Xs2 = None, None

        for i, (tf, color_vec) in enumerate(zip(tfs, color_vecs), start=1):

            print("{} transform iter {}".format(run, i))

            X = net.transform(files, transform=tf, color_vec=color_vec)
            if Xs is None:
                Xs = X
                Xs2 = X**2
            else:
                Xs += X
                Xs2 += X**2

            print('took {:6.1f} seconds'.format(time.time() - tic))
            if i % 5 == 0 or n_iter < 5:
                std = np.sqrt((Xs2 - Xs**2 / i) / (i - 1))
                config.save_features_fold(Xs / i, i, skip=skip, fold=fold,
                                     test=True if run == 'test' else False)
                #config.save_std_fold(std, i, skip=skip, fold=fold,
                #               test=True if run == 'test' else False)
                print('saved {} iterations'.format(i))