Exemple #1
0
	def __init__(self, keyword):
		self.URL = URLer.URLer()
		self.load = Loader.Loader()
		self.Parse = Parser.Parser()
		keyword =  self.Parse.quote_(keyword)
		self.Out = Outer.Outer()
		self.root = self._url + keyword
Exemple #2
0
    def occurence_counter(
            self,
            key):  # emots, hash etc counter - returns descending sorted array
        # and panda data frame wich stores hash count for every tweet
        key_list = []
        occurence_counter = []

        loader = ld.Loader(self.size)

        for tweet in self.tweets:
            list = re.findall(patterns[key], tweet)

            if list:
                list = [
                    hash.lower() for hash in list
                ]  # changes hash text to lowercase to avoid case sensivity
                occurence_counter.append(len(list))
                for ele in list:
                    counter = 0
                    if len(ele) != 1:
                        for tag in key_list:
                            if ele == tag[0]:
                                tag[1] += 1
                                counter = 1
                        if counter == 0:
                            key_list.append([ele, 1])
            else:
                occurence_counter.append(0)

            loader.loading()

        sorted_key_list = sorted(key_list, key=lambda x: x[1], reverse=True)
        sorted_key_list = pd.DataFrame(sorted_key_list)

        return sorted_key_list, occurence_counter
    def check_sentiment_vader(self):
        loader = ld.Loader(self.size)

        sent_analyzer = SentimentIntensityAnalyzer()

        def apply_score(tweet):
            loader.loading()
            return sent_analyzer.polarity_scores(tweet)

        def sentiment_value(compound):
            value = ''
            if compound < -0.05: value = 'negative'
            elif compound > 0.05: value = 'positive'
            else: value = 'neutral'

            return value

        f = lambda x: apply_score(x)
        g = lambda x: sentiment_value(x)

        sentiment_scores = self.tweets.apply(
            f)  #returns data frame of dictionaries
        sentiment_scores = sentiment_scores.apply(
            pd.Series)  #splits dictionaries with keys as columns
        sentiment_compound = sentiment_scores['compound']
        sentiment_values = sentiment_compound.apply(g)

        return sentiment_values, sentiment_compound
Exemple #4
0
    def __init__(self):

        self.memory = Memory()
        self.loader = Loader(self.memory)
        self.assembler = Assembler()
        self.CI = 0  # 12 bits
        self.ACC = 0  # 8 bits
        self.output = []
 def __init__(self, input):
     if (isinstance(input, int)):
         if (int(input) >= 3 and int(input) <= 10):
             self.newGame(int(input))
         else:
             raise Exception("Error: invalid board size")
     else:
         self.puzzles = ld.Loader(input)
         self.createPuzzle(0)
Exemple #6
0
    def construct(self):
        Membre.membres[self.membre] = {}
        #on ne récupère ici qu'un seul objet.(genre objet blender pas objet reel)
        nao = Loader().load(self.membre)[0]

        self.multipleColors = nao.multipleColors()
        self.changeColor = nao.material  #getFaceNbColors()

        Membre.membres[self.membre]["boolText"] = nao.hasTexture
        Membre.membres[self.membre]["len"] = len(nao.tabCoordInd)
        self.boolText = nao.hasTexture

        V = []
        for a in range(len(nao.tabCoordInd)):
            V.append(nao.tabCoord[nao.tabCoordInd[a] - 1])

        VN = []
        for a in range(len(nao.tabNormInd)):
            VN.append(nao.tabNorm[nao.tabNormInd[a] - 1])

        #Create the VBO
        v = numpy.array([V], dtype=numpy.float32)
        Membre.membres[self.membre]["vVBO"] = vbo.VBO(v)

        #Create the VBO
        vn = numpy.array([VN], dtype=numpy.float32)
        Membre.membres[self.membre]["vnVBO"] = vbo.VBO(vn)

        if self.boolText:
            VT = []
            for a in range(len(nao.tabTextInd)):
                VT.append(nao.tabText[nao.tabTextInd[a] - 1])

            #Create the VBO
            vt = numpy.array([VT], dtype=numpy.float32)
            Membre.membres[self.membre]["vtVBO"] = vbo.VBO(vt)
Exemple #7
0
index_total=1
veces_mejorado = 0

load_from_numpy = False
genetic = False
while True:
	index_file = 0
	for file, score in zip(files, best_scores):
		print(file)
		solution = None

		if load_from_numpy and os.path.exists(file+'.obj'):
			filehandler = open(file+'.obj', 'r') 
			solution = pickle.load(filehandler)
		else:
			loader = Loader.Loader(file+'.in')
			# Init variables and rides
			[rides, rows, cols, carsN, ridesN, bonus, steps] = loader.readfile()
			# Init cars
			cars = []
			for i in range(carsN):
				car = Car.Car()
				cars = cars + [car]

			rule_out_rides = []

			next_index_ride = 0
			next_index_car = 0
			fin = False
			while len(rides) > 0 and not fin:
				#coger siguiente coche
Exemple #8
0
def main(model_name, spec_version=1):
    #data = [[1, 2, 3], [4, 5, 6]]

    #x_header = data[0][1:]
    #y_header = [i for i in range(1, 13)]
    #data=data[1:]
    #for i in range(len(data)):
    #data[i] = data[i][1:]
    #arr = np.array(data)
    #fig, ax = plt.subplots()
    #norm = MidpointNormalize(midpoint=0)
    #im = ax.imshow(data, norm=norm, cmap=plt.cm.seismic, interpolation='none')

    #ax.set_xticks(np.arange(arr.shape[1]), minor=False)
    #ax.set_yticks(np.arange(arr.shape[0]), minor=False)
    #ax.xaxis.tick_top()
    #ax.set_xticklabels(x_header, rotation=90)
    #ax.set_yticklabels(y_header)

    #fig.colorbar(im)
    #plt.show()

    loader = Loader()
    loader.load_files_labels('./dataset/train.csv')
    res = Loader.get_general_statistics(loader)
    #return self.classes_frequency, classes_percent, verif_num, verif_num / tot, self.classes_verified, classes_percent_verified
    res_one = res[0]  #res1

    #k=list(res_one.keys())
    v = list(res_one.values())
    k = []
    for i in range(1, 42):
        k.append(i)

    x = np.random.normal(size=3000)
    plt.hist(x, density=True, bins=30)
    x = np.arange(len(k))
    plt.bar(x, height=v)
    plt.xticks(x, k)
    plt.xticks(x, k, rotation='vertical')
    plt.show()

    res_two = res[1]  #res2
    labels = list(res_two.keys())
    sizes = list(res_two.values())
    fig1, ax1 = plt.subplots()
    ax1.pie(sizes, labels=k, shadow=True)
    ax1.axis('equal')
    plt.show()

    res_three = res[2]  #res3   (number)
    print("\n")
    print("////////////////////////////////////")
    print("verified->number: ")
    print(res_three)
    print("////////////////////////////////////")

    res_four = res[3]  #res4   (number)
    print("\n")
    print("////////////////////////////////////")
    print("% verified->number: ")
    print(res_four)
    print("////////////////////////////////////")

    res_five = res[4]  #res5

    #k1=list(res_five.keys())
    k1 = []
    for i in range(1, 42):
        k1.append(i)
    v1 = list(res_five.values())
    x = np.random.normal(size=3000)
    plt.hist(x, density=True, bins=30)
    x = np.arange(len(k1))
    plt.bar(x, height=v1)
    plt.xticks(x, k1)
    plt.xticks(x, k1, rotation='vertical')
    plt.show()

    res_six = res[5]
    #print(res_six)

    labels1 = list(res_six.keys())
    #print("?????????")
    #print(labels1)
    #print("?????????")
    sizes1 = list(res_six.values())
    fig1, ax1 = plt.subplots()
    ax1.pie(sizes1, labels=k1, shadow=True)
    ax1.axis('equal')
    plt.show()
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])

#transform = transforms.Compose([
#    transforms.RandomResizedCrop(224, scale=(0.9, 1.0), ratio=(1.0, 1.0)),
#    transforms.ToTensor()
#])
#test_transform = transforms.Compose([
#    transforms.CenterCrop(224),
#    transforms.ToTensor()
#])

batch_size, seq_len = 256, 50
loader = Loader.Loader(name=name,
                       transform=transform,
                       seq_len=seq_len,
                       batch_size=batch_size,
                       shuffle=True)
tester = Loader.Loader(name=name + '.test',
                       transform=test_transform,
                       seq_len=seq_len,
                       batch_size=batch_size,
                       shuffle=True)

import utils

epoch_num = 0
print('Train:')
for epoch in range(epoch_num):
    for i, (xs, ys) in enumerate(loader):
        xs, ys = xs.cuda(), ys.cuda()
Exemple #10
0
import Loader
loader = Loader.Loader()
loader.launch()
Exemple #11
0
import tflearn
import os
import SimpleNetwork as sn
import keras
import tensorflow as tf

# CREAMOS DIRECTORIO PARA EL MODELO
MODEL_PATH = "/home/javier/DevData/model"
MODEL_NAME = "classificator.model"
if not os.path.exists(MODEL_PATH):
    os.makedirs(MODEL_PATH, mode=0o755)

if __name__ == '__main__':

    #CARGAMOS LAS COLECCIONES DE IMAGENES
    loader = ld.Loader("/home/javier/DevData/")
    (X, Y), (X_test, Y_test) = loader.loadData()

    # TODO IDENTIFICAR QUE HACEN ESTAS FUNCIONES
    Y = tflearn.data_utils.to_categorical(Y, 2)
    Y_test = tflearn.data_utils.to_categorical(Y_test, 2)

    # ENTRENAMIENTO RRNN NORMAL
    print("[INFO] CREATING MODEL WITH CLASSICAL NEURAL NETWORK")
    model_name_NN = 'nn_' + MODEL_NAME
    model_NN = tflearn.DNN(sn.nn(), checkpoint_path='model_NN',
                           max_checkpoints=10, tensorboard_verbose=3)

    print("[INFO] TRAINING CLASSICAL NEURAL NETWORK")
    model = keras.Sequential([
        keras.layers.Flatten(input_shape=(128, 128)),
Exemple #12
0
log = logging.getLogger('')
log.setLevel(logging.DEBUG)
format = logging.Formatter(
    "%(asctime)s - %(name)s - %(levelname)s - %(message)s")

ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(format)
log.addHandler(ch)

fh = logging.handlers.RotatingFileHandler(logFile, maxBytes=0, backupCount=0)
fh.setFormatter(format)
log.addHandler(fh)

log.info('loader: START')

ld = Loader()
ld.start()

if ld.checkPsqlCanExecuteCommand():
    log.info('loader: psql available.')
else:
    log.info('ERROR: psql command is NOT AVAILABLE.')
    sys.exit()

if ld.checkYouHaveTheRightTables():
    log.info('loader: your tables are correct.')
else:
    log.info('ERROR: your database tables are wrong.')
    sys.exit()

log.info('loader: all checks are ok.')
Exemple #13
0
def main():
    ''' LOADING ANY DATASET '''
    dataset_dir = '/dataset'
    dataset_type = '/BIOLOGICAL'
    dataset_name = '/WISCONSIN'

    #this variable decide whether to balance or not the dataset
    resample = True
    p_step = 1

    # defining directory paths for saving partial and complete result
    path_data_folder = dataset_dir + dataset_type + dataset_name
    path_data_file = path_data_folder + dataset_name
    variables = ['X', 'Y']

    print('%d.Loading and pre-processing the data...\n' % p_step)
    p_step += 1
    # NB: If you get an error such as: 'Please use HDF reader for matlab v7.3 files',please change the 'format variable' to 'matlab_v73'
    D = lr.Loader(file_path=path_data_file,
                  format='matlab',
                  variables=variables,
                  name=dataset_name[1:]).getVariables(variables=variables)

    dataset = ds.Dataset(D['X'], D['Y'])

    n_classes = dataset.classes.shape[0]
    cls = np.unique(dataset.classes)

    # check if the data are already standardized, if not standardize it
    dataset.standardizeDataset()

    # re-sampling dataset
    num_min_cls = 9999999
    print('%d.Class-sample separation...\n' % p_step)
    p_step += 1
    if resample == True:

        print(
            '\tDataset %s before resampling w/ size: %s and number of classes: %s---> %s'
            % (dataset_name[1:], dataset.data.shape, n_classes, cls))

        # discriminating classes of the whole dataset
        dataset_train = ds.Dataset(dataset.data, dataset.target)
        dataset_train.separateSampleClass()
        data, target = dataset_train.getSampleClass()

        for i in xrange(0, n_classes):
            print('\t\t#sample for class C%s: %s' % (i + 1, data[i].shape))
            if data[i].shape[0] < num_min_cls:
                num_min_cls = data[i].shape[0]

        resample = '/BALANCED'
        print('%d.Class balancing...' % p_step)
        dataset.data, dataset.target = SMOTE(
            kind='regular',
            k_neighbors=num_min_cls - 1).fit_sample(dataset.data,
                                                    dataset.target)
        p_step += 1
    else:
        resample = '/UNBALANCED'

    # shuffling data
    print('\tShuffling data...')
    dataset.shufflingDataset()

    print('\tDataset %s w/ size: %s and number of classes: %s---> %s' %
          (dataset_name[1:], dataset.data.shape, n_classes, cls))

    # discriminating classes the whole dataset
    dataset_train = ds.Dataset(dataset.data, dataset.target)
    dataset_train.separateSampleClass()
    data, target = dataset_train.getSampleClass()

    for i in xrange(0, n_classes):
        print('\t\t#sample for class C%s: %s' % (i + 1, data[i].shape))

    # Max number of features to use
    max_num_feat = 300
    step = 1
    # max_num_feat = dataset.data.shape[1]

    if max_num_feat > dataset.data.shape[1]:
        max_num_feat = dataset.data.shape[1]

    alpha = 10  #regularizatio parameter (typically alpha in [2,50])

    params = {
        'SMBA':
        # the smaller is alpha the sparser is the C matrix (fewer representatives)
        {
            'alpha': alpha,
            'norm_type': 1,
            'max_iter': 3000,
            'thr': [10**-8],
            'type_indices': 'nrmInd',
            'normalize': False,
            'GPU': False,
            'device': 0,
            'PCA': False,
            'verbose': False,
            'step': 1,
            'affine': False,
        }
        # it's possible to add other FS methods by modifying the correct file
    }

    fs_model = fs.FeatureSelector(name='SMBA', tp='SLB', params=params['SMBA'])
    fs_name = 'SMBA'

    # CLASSIFIERS (it's possible to add other classifier methods by adding entries into this list)
    clf_name = [
        "SVM"
        # "Decision Tree",
        # "KNN"
    ]
    model = [
        SVC(kernel="linear")
        # DecisionTreeClassifier(max_depth=5),
        # KNeighborsClassifier(n_neighbors=1)
    ]
    '''Perform K-fold Cross Validation...'''
    k_fold = 10

    #defining result folders
    fs_path_output = '/CSFS/FS/K_FOLD'
    checkFolder(path_data_folder, fs_path_output)

    res_path_output = '/CSFS/RESULTS/K_FOLD'
    checkFolder(path_data_folder, fs_path_output)

    all_scores = {}
    all_scores.update({fs_name: []})

    cc_fold = 0
    conf_dataset = {}

    X = dataset.data
    y = dataset.target
    kf = KFold(n_splits=k_fold)

    print(
        '%d.Running the Intra-Class-Specific Feature Selection and building the ensemble classifier...\n'
        % p_step)
    p_step += 1
    for train_index, test_index in kf.split(X):

        X_train_kth, X_test_kth = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        print('\tDOING %s-CROSS VALIDATION W/ TRAINING SET SIZE: %s' %
              (cc_fold + 1, X_train_kth.shape))
        ''' For the training data in each class we find the representative features and use them as a best subset feature
            (in representing each class sample) to perform classification
        '''

        csfs_res = {}

        for i in xrange(0, n_classes):
            cls_res = {'C' + str(cls[i]): {}}
            csfs_res.update(cls_res)

        kth_scores = {}
        for i in xrange(0, len(clf_name)):
            kth_scores.update({clf_name[i]: []})

        # check whether the 'curr_res_fs_fold' directory exists, otherwise create it
        curr_res_fs_fold = path_data_folder + '/' + fs_path_output + '/' + fs_name + resample
        checkFolder(path_data_folder,
                    fs_path_output + '/' + fs_name + resample)

        # discriminating classes for the k-th fold of the training set
        data_train = ds.Dataset(X_train_kth, y_train)
        data_train.separateSampleClass()
        ktrain_data, ktrain_target = data_train.getSampleClass()
        K_cls_ind_train = data_train.ind_class

        for i in xrange(0, n_classes):
            # print ('Train set size C' + str(i + 1) + ':', ktrain_data[i].shape)

            print('\tPerforming feature selection on class %d with shape %s' %
                  (cls[i] + 1, ktrain_data[i].shape))

            start_time = time.time()
            idx = fs_model.fit(ktrain_data[i], ktrain_target[i])

            # print idx

            print('\tTotal Time = %s seconds\n' % (time.time() - start_time))

            csfs_res['C' + str(cls[i])]['idx'] = idx
            csfs_res['C' + str(cls[i])]['params'] = params[fs_name]

            # with open(curr_res_fs_fold + '/' + str(cc_fold + 1) + '-fold' + '.pickle', 'wb') as handle:
            #     pickle.dump(csfs_res, handle, protocol=pickle.HIGHEST_PROTOCOL)

        ens_class = {}
        # learning a classifier (ccn) for each subset of 'n_rep' feature
        for j in xrange(0, max_num_feat):
            n_rep = j + 1  # first n_rep indices

            for i in xrange(0, n_classes):
                # get subset of feature from the i-th class
                idx = csfs_res['C' + str(cls[i])]['idx']

                # print idx[0:n_rep]

                X_train_fs = X_train_kth[:, idx[0:n_rep]]

                _clf = i_clf.Classifier(names=clf_name, classifiers=model)
                _clf.train(X_train_fs, y_train)

                csfs_res['C' + str(cls[i])]['accuracy'] = _clf.classify(
                    X_test_kth[:, idx[0:n_rep]], y_test)

            DTS = classificationDecisionRule(csfs_res, cls, clf_name, y_test)

            for i in xrange(0, len(clf_name)):
                _score = DTS[clf_name[i]]
                # print ('Accuracy w/ %d feature: %f' % (n_rep, _score))
                kth_scores[clf_name[i]].append(_score)

        x = np.arange(1, max_num_feat + 1)

        kth_results = {
            'clf_name': clf_name,
            'x': x,
            'scores': kth_scores,
        }

        all_scores[fs_name].append(kth_results)

        # saving k-th dataset configuration
        # with open(path_data_folder + fs_path_output + '/' + str(cc_fold + 1) + '-fold_conf_dataset.pickle',
        #           'wb') as handle:  # TODO: customize output name for recognizing FS parameters' method
        #     pickle.dump(conf_dataset, handle, protocol=pickle.HIGHEST_PROTOCOL)

        cc_fold += 1

    # print all_scores

    print('%s.Averaging results...\n' % p_step)
    p_step += 1
    # Averaging results on k-fold

    # check whether the 'curr_res_fs_fold' directory exists, otherwise create it
    curr_res_output_fold = path_data_folder + '/' + res_path_output + '/' + fs_name + resample
    checkFolder(path_data_folder, res_path_output + '/' + fs_name + resample)

    M = {}
    for i in xrange(0, len(clf_name)):
        M.update({clf_name[i]: np.ones([k_fold, max_num_feat]) * 0})

    avg_scores = {}
    std_scores = {}
    for i in xrange(0, len(clf_name)):
        avg_scores.update({clf_name[i]: []})
        std_scores.update({clf_name[i]: []})

    # k-fold results for each classifier
    for k in xrange(0, k_fold):
        for clf in clf_name:
            M[clf][k, :] = all_scores[fs_name][k]['scores'][clf][:max_num_feat]

    for clf in clf_name:
        avg_scores[clf] = np.mean(M[clf], axis=0)
        std_scores[clf] = np.std(M[clf], axis=0)

    x = np.arange(1, max_num_feat + 1)
    results = {
        'clf_name': clf_name,
        'x': x,
        'M': M,
        'scores': avg_scores,
        'std': std_scores
    }

    # print avg_scores

    with open(curr_res_output_fold + '/clf_results.pickle', 'wb') as handle:
        pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)
    print('Done with %s, [%d-cross validation] ' % (dataset_name[1:], k_fold))
from argparse import Namespace
from tools.Review import Review, rates_table_names
import Loader
import Recommender

user_test_ids = [
    'A1GJGQAEQCFZB0', 'A15UAAF89CZCTJ', 'A3S1HDQLL3BLLP', 'AKBVYIIHWI04B',
    'A2HTWILZUNAWJ0', 'A0266076X6KPZ6CCHGVS', 'A3OVAZG3IK32WC',
    'A2RLPCNSPSAQ0', 'A32U48NPM01NIU', 'A2VKBENU2H4VN'
]
# Connect to test_database and orig_database
test = Loader.Loader(new_bd=False, db_name='recommender.db')
results = {}
for user_test_id in user_test_ids:
    s_recomendations = []
    d_recomendations = []
    s_num_products = 0
    d_num_products = 0

    # Get the first bought product of each category by user in test_database
    first_bought = []
    for table_name in rates_table_names:
        first_from_table = Review.get_first_review_from_user(
            test.cursor, user_test_id, table_name)
        if first_from_table:
            first_bought.append(first_from_table.product_id)

    # Simulate buying the first_bought products and get static and dinamic recommendations
    for product_id in first_bought:
        static_args = Namespace(database='recommender.db',
                                user=user_test_id,
from PandaModules import *
import Loader
outputFile = 'MazeData.py'
mazeNames = [
 [
  'phase_4/models/minigames/maze_1player'], ['phase_4/models/minigames/maze_2player'], ['phase_4/models/minigames/maze_3player'], ['phase_4/models/minigames/maze_4player']]
CELL_WIDTH = 2
loader = Loader.Loader(None)
root = NodePath('root')

def fwrite(file, str):
    file.write(str)
    file.write('\n')


def fwritelines(file, strs):
    for str in strs:
        fwrite(file, str)


def calcMazeTopology(mazeNode):
    cRay = CollisionRay(0, 0, 50, 0, 0, -1)
    cNode = CollisionNode('cNode')
    cNode.addSolid(cRay)
    cNodePath = root.attachNewNode(cNode)
    cNode.setCollideMask(BitMask32.allOff())
    cNode.setCollideGeom(1)
    cQueue = CollisionHandlerQueue()
    cTrav = CollisionTraverser()
    cTrav.addCollider(cNode, cQueue)
Exemple #16
0
 def setUp(self):
     self.imp = Loader()
Exemple #17
0
# import BFS as bfs
import BestFirst as bfs
import AStar as ass
# import DFS as dfs
import DepthFirst as dfs
import Loader as ld
import SolutionWriter as wr
import time

# loads the test.txt input file.
loader = ld.Loader("test.txt")

# get solution for dfs
for i in range(loader.getMyPuzzleSize()):
    # calculate the time
    start = time.time()
    # loads puzzle #1
    initSet = loader.getMyPuzzleAt(i)
    dfSearcher = dfs.DepthFirst(initSet.getMaxDeepth(), False)
    dfSearcher.addRoot(initSet.get1DState())
    # runs search using DFS method
    dfSearcher.doSearch()
    print("FINAL SOLUTION")
    print(dfSearcher.solution)
    print("SOL FOUND?")
    print(dfSearcher.isSolFound)
    solution = dfSearcher.getFinalSolution()
    search = dfSearcher.getSearchPath()
    print(solution)
    # prints output
    writer = wr.SolutionWriter(i, "dfs")
if __name__ == '__main__':
    ''' LOADING ANY DATASET '''
    dataset_dir = '/dataset'
    dataset_type = '/BIOLOGICAL'
    dataset_name = '/LUNG_DISCRETE'

    resample = True

    path_data_folder = dataset_dir + dataset_type + dataset_name
    path_data_file = path_data_folder + dataset_name

    variables = ['X', 'Y']
    # NB: If you get an error such as: 'Please use HDF reader for matlab v7.3 files',please change the 'format variable' to 'matlab_v73'
    D = lr.Loader(file_path=path_data_file,
                  format='matlab',
                  variables=variables,
                  name=dataset_name[1:]).getVariables(variables=variables)

    dataset = lr.Dataset(D['X'], D['Y'])

    # check if the data are already standardized, if not standardize it
    dataset.standardizeDataset()

    n_classes = dataset.classes.shape[0]
    cls = np.unique(dataset.classes)

    num_min_cls = 9999999
    if resample == True:

        print(
            'Dataset before resampling %s w/ size: %s and number of classes: %s---> %s'
Exemple #19
0
    def __init__(self):
        self.config = ConfigConfigureGetConfigConfigShowbase
        if self.config.GetBool('use-vfs', 1):
            vfs = VirtualFileSystem.getGlobalPtr()
        else:
            vfs = None
        self.sfxActive = self.config.GetBool('audio-sfx-active', 1)
        self.musicActive = self.config.GetBool('audio-music-active', 1)
        self.wantFog = self.config.GetBool('want-fog', 1)
        self.screenshotExtension = self.config.GetString(
            'screenshot-extension', 'jpg')
        self.musicManager = None
        self.musicManagerIsValid = None
        self.sfxManagerList = []
        self.sfxManagerIsValidList = []
        self.wantStats = self.config.GetBool('want-stats', 0)
        self.exitFunc = None
        taskMgr.taskTimerVerbose = self.config.GetBool('task-timer-verbose', 0)
        taskMgr.extendedExceptions = self.config.GetBool(
            'extended-exceptions', 0)
        taskMgr.pStatsTasks = self.config.GetBool('pstats-tasks', 0)
        taskMgr.resumeFunc = PStatClient.resumeAfterPause
        fsmRedefine = self.config.GetBool('fsm-redefine', 0)
        State.FsmRedefine = fsmRedefine
        try:
            self.clusterSyncFlag = clusterSyncFlag
        except NameError:
            self.clusterSyncFlag = self.config.GetBool('cluster-sync', 0)
        else:
            self.hidden = NodePath('hidden')
            self.graphicsEngine = GraphicsEngine()
            self.setupRender()
            self.setupRender2d()
            self.setupDataGraph()
            self.cTrav = 0
            self.appTrav = 0
            self.dgTrav = DataGraphTraverser()
            self.win = None
            self.winList = []
            self.mainWinMinimized = 0
            self.pipe = None
            self.pipeList = []
            self.mak = None
            self.cam = None
            self.camList = []
            self.camNode = None
            self.camLens = None
            self.camera = None
            self.cameraList = []
            self.camera2d = self.render2d.attachNewNode('camera2d')
            self.oldexitfunc = getattr(sys, 'exitfunc', None)
            sys.exitfunc = self.exitfunc
            if self.config.GetBool('open-default-window', 1):
                self.openMainWindow()
                self.graphicsEngine.renderFrame()
                self.graphicsEngine.renderFrame()
                if self.win.isClosed():
                    self.notify.info('Window did not open, removing.')
                    self.closeWindow(self.win)
                if self.win == None:
                    self.makeAllPipes()
                    while self.win == None and len(self.pipeList) > 1:
                        self.pipeList.remove(self.pipe)
                        self.pipe = self.pipeList[0]
                        self.openMainWindow()
                        self.graphicsEngine.renderFrame()
                        self.graphicsEngine.renderFrame()
                        if self.win.isClosed():
                            self.notify.info('Window did not open, removing.')
                            self.closeWindow(self.win)

        self.loader = Loader.Loader(self)
        self.eventMgr = eventMgr
        self.messenger = messenger
        self.taskMgr = taskMgr
        self.particleMgr = particleMgr
        self.particleMgr.setFrameStepping(1)
        self.particleMgrEnabled = 0
        self.physicsMgr = physicsMgr
        integrator = LinearEulerIntegrator()
        self.physicsMgr.attachLinearIntegrator(integrator)
        self.physicsMgrEnabled = 0
        self.physicsMgrAngular = 0
        self.createBaseAudioManagers()
        self.createStats()
        self.AppHasAudioFocus = 1
        __builtins__['base'] = self
        __builtins__['render2d'] = self.render2d
        __builtins__['aspect2d'] = self.aspect2d
        __builtins__['render'] = self.render
        __builtins__['hidden'] = self.hidden
        __builtins__['camera'] = self.camera
        __builtins__['loader'] = self.loader
        __builtins__['taskMgr'] = self.taskMgr
        __builtins__['eventMgr'] = self.eventMgr
        __builtins__['messenger'] = self.messenger
        __builtins__['config'] = self.config
        __builtins__['run'] = self.run
        __builtins__['ostream'] = Notify.out()
        __builtins__['directNotify'] = directNotify
        __builtins__['globalClock'] = ClockObject.getGlobalClock()
        __builtins__['vfs'] = vfs
        self.accept('window-event', self.__windowEvent)
        import Transitions
        self.transitions = Transitions.Transitions(self.loader)
        self.startTk(self.config.GetBool('want-tk', 0))
        self.startDirect(self.config.GetBool('want-directtools', 0))
        self.restart()
        return
Exemple #20
0
 def __init__(self, root):
     global_config = Config.Config(Util.get_path(root,
                                                 'config.json')).load()
     self.loader = Loader.Loader(Util.get_path(root, 'src'), global_config)
     self.processor = Processor.Processor(root, global_config)
     self.generator = Generator.Generator(root, global_config)