Example #1
0
def main():
    if checkpoint.ok:
        loader = data.Data(args)
        if args.model.lower() == 'edsr':
            t_model = EDSR(args, is_teacher=True).to(device)
            s_model = PAMS_EDSR(args, bias=True).to(device)
        elif args.model.lower() == 'rdn':
            t_model = RDN(args, is_teacher=True).to(device)
            s_model = PAMS_RDN(args).to(device)
        else:
            raise ValueError('not expected model = {}'.format(args.model))

        if args.pre_train is not None:
            t_checkpoint = torch.load(args.pre_train) 
            t_model.load_state_dict(t_checkpoint)
        
        if args.test_only:
            if args.refine is None:
                ckpt = torch.load(f'{args.save}/model/model_best.pth.tar')
                refine_path = f'{args.save}/model/model_best.pth.tar'
            else:
                ckpt = torch.load(f'{args.refine}')
                refine_path = args.refine

            s_checkpoint = ckpt['state_dict']
            s_model.load_state_dict(s_checkpoint)
            print(f"Load model from {refine_path}")

        t = Trainer(args, loader, t_model, s_model, checkpoint)
        
        print(f'{args.save} start!')
        while not t.terminate():
            t.train()
            t.test()

        checkpoint.done()
        print(f'{args.save} done!')
Example #2
0
File: main.py Project: DM865/CVRP
def main(argv):

    parser = argparse.ArgumentParser()

    parser.add_argument('-o', action='store',
                        dest='output_file',
                        help='The file where to save the solution and, in case, plots')

    parser.add_argument('-t', action='store',
                        dest='time_limit',
                        type=int,
                        required=True,
                        help='The time limit')

    parser.add_argument('instance_file', action='store',
                        help='The path to the file of the instance to solve')

    config = parser.parse_args()

    print('instance_file    = {!r}'.format(config.instance_file))
    print('output_file      = {!r}'.format(config.output_file))
    print('time_limit       = {!r}'.format(config.time_limit))

    instance = data.Data(config.instance_file)
    instance.short_info()
    # if config.output_file is not None:
    #    instance.plot_points(config.output_file+'.png');
    # instance.show()

    sol = solve(instance, config)

    assert sol.valid_solution()
    if config.output_file is not None:
        sol.plot_routes(config.output_file+'_sol'+'.png')
        sol.write_to_file(config.output_file+'.sol')
    print("{} routes with total cost {:.1f}"
          .format(len(sol.routes), sol.cost()))
Example #3
0
    def listings(self):

        req = requests.get(
            self.url, headers={"user-agent": spoof().browser("Chrome", 0)})
        soup = bsoup(req.text, "html5lib")
        titles = soup.findAll("a", {"class": "visual-title dark"})

        ttls = np.unique([title.get_text().strip() for title in titles])

        dicts = []
        patt = re.compile("..\dth\s(Anniversary)")
        for t in ttls:

            if t.find("3D") > -1:
                pass
            elif t.find("Anniversary") > -1:
                pass
            elif t.find("(") > -1:
                ttl, yr = t.split(" (")
                dicts.append({
                    "title": ttl,
                    "year": yr.replace(")", ""),
                    "rank": "",
                    "watched": None
                })
            else:
                ttl, yr = t, ""
                dicts.append({
                    "title": ttl,
                    "year": yr,
                    "rank": "",
                    "watched": None
                })

        m = data.Data(mediatype="movie")
        m.collect(args=dicts)
        return m.movies
Example #4
0
def test2():
    data_obj = dt.Data("african_hiv_econ.csv")
    headers = data_obj.get_headers()
    print(data_obj.get_headers())
    print(
        linear_regression(data_obj,
                          [headers[1], headers[6], headers[9], headers[3]],
                          headers[0])[0])
    print(
        linear_regression(data_obj,
                          [headers[1], headers[6], headers[9], headers[3]],
                          headers[0])[1])
    print(
        linear_regression(data_obj,
                          [headers[1], headers[6], headers[9], headers[3]],
                          headers[0])[2])
    print(
        linear_regression(data_obj,
                          [headers[1], headers[6], headers[9], headers[3]],
                          headers[0])[3])
    print(
        linear_regression(data_obj,
                          [headers[1], headers[6], headers[9], headers[3]],
                          headers[0])[4])
Example #5
0
def main(argv):

    if len(argv) < 4:
        print(
            "Usage: python %s <data file> <independent header> <dependent header>"
        )
        exit(-1)

    # read some data
    data_obj = data.Data(argv[1])
    ind_header = argv[2]
    dep_header = argv[3]

    # call the analysis function
    results = analysis.single_linear_regression(data_obj, ind_header,
                                                dep_header)

    # print out the results
    print("Model:    y = %.4fx + %.4f" % (results[0], results[1]))
    print("R-value:  %.3f" % (results[2]))
    print("P-value:  %.3f" % (results[3]))
    print("Stderr:   %.3f" % (results[4]))

    return
Example #6
0
def main():
    global model
    if args.data_test == ['video']:
        from videotester import VideoTester
        model = model.Model(args, checkpoint)
        t = VideoTester(args, model, checkpoint)
        t.test()
    else:
        if checkpoint.ok:

            args.test_only = True 
            args.resume = -2 
            args.batch_size  = 16
            #args.is_fcSim = True 

            loader = data.Data(args)
            _model = model.Model(args, checkpoint)
            _loss = loss.Loss(args, checkpoint) if not args.test_only else None
            t = Trainer(args, loader, _model, _loss, checkpoint)
            while not t.terminate():
                t.train()
                t.test()

            checkpoint.done()
Example #7
0
 def process_gen(self,window,filename1):
     self.inc = random.randint(0,pow(16,4)-1)
     self.inc = f"{self.inc :x}"
     self.inc = f"{self.inc :0>4}"
     obj = data.Data(self.inc)
     list1 = obj.generation()
     if('.txt' in filename1):
         with open(filename1,'w') as f:
             for packets in list1:
                 f.write("%s\n"%packets)
         msg = "One file is generated with 50 packets of data \nand the filename as "+filename1
         self.filename = filename1
     else:
         with open(filename1+'.txt','w') as f:
             for packets in list1:
                 f.write("%s\n"%packets)
         msg = "One file is generated with 50 packets of data \nand the filename as "+filename1+'.txt'
         self.filename = filename1+'.txt'
     process_gen = Tk()
     process_gen.title("msg box")
     process_gen.configure(background='grey')
     label_0 = Label(process_gen, text=msg,font="Times 12",bg='white',fg='black')
     label_0.grid(row=0,column=0,padx=10,pady=10)
     process_gen.mainloop()
Example #8
0
    def __init__(self, task, inference=False):
        dt_ner = dt.Data(task=task, inference=inference)
        # Load default model
        self.nlp = he.load_spacy_model(language=cu.params.get('language'),
                                       disable=['ner', 'parser', 'tagger'])

        # Add flair pipeline #TODO: excluding FALIR for now, to be compared with text analytics
        # flair_matcher = FlairMatcher(dt_ner.get_path('fn_ner_flair'))
        # self.nlp.add_pipe(flair_matcher)

        # Text Analytics
        ta_matcher = TextAnalyticsMatcher()
        self.nlp.add_pipe(ta_matcher)

        # Load phrase matcher
        self.matcher = PhraseMatcher(self.nlp.vocab, attr="LOWER")
        matcher_items = pd.read_csv(dt_ner.get_path('fn_ner_list',
                                                    dir='asset_dir'),
                                    encoding='utf-8',
                                    sep='\t')
        for product in matcher_items['key'].drop_duplicates():
            _values = matcher_items[matcher_items['key'] == product]
            patterns = [self.nlp.make_doc(v) for v in _values.value]
            self.matcher.add(product, None, *patterns)
Example #9
0
def main(argv):
    if len(argv) < 2:
        print 'Usage: python %s <all numeric CSV file>' % (argv[0])
        exit(-1)

    try:
        d = data.Data(argv[1])
    except:
        print 'Unable to open %s' % (argv[1])
        exit(-1)

    print "D :", d

    an = analysis.Analysis()

    codebook, codes, errors = an.kmeans_numpy(d, d.get_headers(), 2)

    an.kmeans_init(d, 2, categories=np.matrix([0, 1, 2, 2123, 141, 2]))

    print "\nCodebook\n"
    print codebook

    print "\nCodes\n"
    print codes
Example #10
0
    def build_by_pickle(self,
                        test_data_file,
                        model_dir,
                        data_filename,
                        pickle_file,
                        loadComplEx=False):
        """
        :param pickle_file: pickle embedding
        :return:
        """
        # load the saved Data()
        self.this_data = data.Data()
        data_save_path = join(model_dir, data_filename)
        self.this_data.load(data_save_path)

        # load testing data
        self.load_test_data(test_data_file)

        self.model_dir = model_dir  # used for saving

        with open(pickle_file, 'rb') as f:
            ht, r = pickle.load(f)  # unpickle
        self.vec_c = ht
        self.vec_r = r
Example #11
0
	def __init__(self):
		super().__init__()
		self.DATA_MODULE   = dt.Data()
		self.NUM_SERVER    = self.DATA_MODULE.getIpListLength()
		self.IP_LIST       = self.DATA_MODULE.getIpList()
		self.PORT          = self.DATA_MODULE.getPort()
		self.BUFFER_LENGHT = 128

		#Avaliable Threads
		self.threads = []

		#Destroying Windows Flags
		self.setWindowFlags(
						QtCore.Qt.Window |
						QtCore.Qt.CustomizeWindowHint |
						QtCore.Qt.WindowTitleHint |
						QtCore.Qt.WindowCloseButtonHint |
						QtCore.Qt.WindowStaysOnTopHint
						)

		#PyQT sets
		self.pixmap = QPixmap('images/background.png')
		#self.setWindowIcon(QtGui.QIcon('images/icon.png'))
		self.initUI()
Example #12
0
    def exec(self,
             sym,
             data5min,
             ex1: Btexchange,
             ord_log=None,
             strat_log=None):
        self.symbol = sym
        self.ex1 = ex1
        # print(data5min)
        self.ts = data5min.iloc[19]['timespan1']
        self.lasto = data5min.iloc[19]['open']
        self.lasth = data5min.iloc[19]['high']
        self.lastl = data5min.iloc[19]['low']
        self.lastc = data5min.iloc[19]['close']
        # self.candles = "["+str(self.ts)+","+str(self.lasto)+","+str(self.lasth)+","+str(self.lastl)+","+str(self.lastc)+"]"
        self.candle = []
        self.candle.append(self.ts)
        self.candle.append(self.lasto)
        self.candle.append(self.lasth)
        self.candle.append(self.lastl)
        self.candle.append(self.lastc)

        self.symbol = sym.upper()
        datac = data.Data()
        self.data = data5min
        # self.datah = datac.geth(self.data)
        self.cch = 0
        self.cc5l = 0
        self.cc5n = 0
        self.cc5h = 0
        # self.ex = exch

        self.ord_log = ord_log
        self.strat_log = strat_log
        self.ta()
        self.decide()
Example #13
0
    def __init__(self,
                 task,
                 inference=False,
                 models=['Flair', 'TextAnalytics', 'nerList', 'regEx']):
        if len(models) < 1:
            log.warning('Specify at least one model to use for NER.')
            return None

        self.models = models

        dt_ner = dt.Data(task=task, inference=inference)
        # Load default model
        self.nlp = he.load_spacy_model(language=cu.params.get('language'),
                                       disable=['ner', 'parser', 'tagger'])

        # Add flairNER to pipeline
        if 'Flair' in self.models:
            flair_matcher = FlairMatcher(dt_ner.get_path('fn_ner_flair'))
            self.nlp.add_pipe(flair_matcher)

        # Add Azure Text Analytics to pipeline
        if 'TextAnalytics' in self.models:
            ta_matcher = TextAnalyticsMatcher()
            self.nlp.add_pipe(ta_matcher)

        # Add phrase matcher based on nerList
        if 'nerList' in self.models:
            self.matcher = PhraseMatcher(self.nlp.vocab, attr="LOWER")
            matcher_items = pd.read_csv(dt_ner.get_path('fn_ner_list',
                                                        dir='asset_dir'),
                                        encoding='utf-8',
                                        sep='\t')
            for product in matcher_items['key'].drop_duplicates():
                _values = matcher_items[matcher_items['key'] == product]
                patterns = [self.nlp.make_doc(v) for v in _values.value]
                self.matcher.add(product, None, *patterns)
def main():
    path = sys.argv[1]
    total_files = len(sys.argv) - 2
    test_names = []

    for i in range(total_files):
        test_names.append(sys.argv[i + 2])

    datasets = data.Data(path, test_names)
    model_type = MODEL_TYPES[
        "CONVOLUTIONAL"]  # HERE GOES THE MODEL WE CONFIGURED IN CORSS VALIDATION
    epochs = 10  # HERE GOES THE EPOCHS WE CONFIGURED IN CORSS VALIDATION
    neurons = 64  # HERE GOES THE NEURONS WE CONFIGURED IN CORSS VALIDATION
    dropout = 0.1  # HERE GOES THE DROPOUT WE CONFIGURED IN CORSS VALIDATION
    batches_size = 64  # HERE GOES THE BATCH_SIZE WE CONFIGURED IN CORSS VALIDATION
    m = model.Model(model_type=model_type,
                    train_dataset=datasets.train,
                    neurons=neurons,
                    dropout=dropout,
                    val_dataset=datasets.test,
                    test_files=datasets.test_files,
                    path=path)
    m.train(epochs, batches_size)
    m.eval()
    def translate(self, headers, magnitudes):
        '''Translates the variables `headers` in projected dataset in corresponding amounts specified
        by `magnitudes`.

        Parameters:
        -----------
        headers: Python list of str.
            Specifies the variables along which the projected dataset should be translated.
        magnitudes: Python list of float.
            Translate corresponding variables in `headers` (in the projected dataset) by these amounts.

        Returns:
        -----------
        ndarray. shape=(N, num_proj_vars). The translated data (with all variables in the projected).
            dataset. NOTE: There should be NO homogenous coordinate!
        '''
        
        newDict = self.data.get_mappings()
        
        translation = self.translation_matrix(headers, magnitudes)
        newData = translation @ self.get_data_homogeneous().transpose()
        
        self.data = data.Data(headers = headers, data = newData.transpose()[:,:-1], header2col = dict)
        return newData.transpose()[:,:-1]
Example #16
0
def main():
	dclean = data.Data('data-clean.csv')
	dgood = data.Data('data-good.csv')
	dnoisy = data.Data('data-noisy.csv')
	daussie = data.Data('aussiecoast.csv')
	drb = data.Data('RB 2.csv')
	dsat = data.Data('SATdata.csv')

	headersclean=dclean.get_headers()
	headersgood = dgood.get_headers()
	headersnoisy = dnoisy.get_headers()
	headersaussie= daussie.get_headers()
	headersrb=drb.get_headers()
	headersat=dsat.get_headers()

	xheadrb=headersrb[1:3]
	yheadrb=headersrb[0]

	xheadaussie=headersaussie[1:3]
	yheadaussie=headersaussie[0]

	xheadclean=headersclean[1:3]
	yheadclean=headersclean[0]

	xheadgood=headersgood[1:3]
	yheadgood=headersgood[0]

	xheadnoisy = headersnoisy[1:3]
	yheadnoisy = headersnoisy[0]

	xheadsat=headersat[1:3]
	yheadsat=headersat[0]


	print "Multiple Linear Regression Australia Coast Data"
	print "independent variables", xheadaussie
	print "dependent variable", yheadaussie
	print "b, sse, r2, t, p"
	print linearRegression(daussie,xheadaussie,yheadaussie)
Example #17
0
    "../../../enron/gender_identified_enron_corpus/graph/partitions/structure_employee_wemail_random_test0.2.csv"
]
filename = [
    "../../../enron/gender_identified_enron_corpus/graph/partitions/structure_employee_wemail_random_train0.8.csv",
    "../../../enron/gender_identified_enron_corpus/graph/structure_wemail.csv",
    "../../../enron/gender_identified_enron_corpus/graph/structure_wemail.csv"
]

if len(sys.argv) > 1:
    this_dim = int(sys.argv[1])
    model_path = sys.argv[2]
    data_path = sys.argv[3]
    more_filt = [sys.argv[4]]
    filename = sys.argv[5:]

this_data = data.Data()
this_data.load_data_enron_csv(filenames=filename)
for f in more_filt:
    this_data.record_more_enron_csv(f)

# In[ ]:

m_train = Trainer()
m_train.build(this_data,
              dim=this_dim,
              batch_size=64,
              num_neg=10,
              save_path=model_path,
              data_save_path=data_path,
              L1=False)
Example #18
0
    logger = logging.getLogger()
    if opt.verbose:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    logging.info(opt)

    if opt.random_seed != 0:
        random.seed(opt.random_seed)
        np.random.seed(opt.random_seed)
        torch.manual_seed(opt.random_seed)
        torch.cuda.manual_seed_all(opt.random_seed)

    d = data.Data(opt)



    logging.info(d.config)

    makedir_and_clear(opt.output)

    logging.info("load data ...")
    train_data = data.loadData(opt.train_file, True, opt.types, opt.type_filter)
    dev_data = data.loadData(opt.dev_file, True, opt.types, opt.type_filter)
    if opt.test_file:
        test_data = data.loadData(opt.test_file, False, opt.types, opt.type_filter)
    else:
        test_data = None

def kmeans2(d, headers, K, whiten=True):
    '''Takes in a Data object, a set of headers, and the number of clusters to create
	Computes and returns the codebook, codes, and representation error.
	'''
    if isinstance(d, np.ndarray):
        A = d

    # assign to A the result getting the data given the headers
    else:
        A = d.get_data(headers).T

    # assign to W the result of calling vq.whiten on A
    W = vq.whiten(A)

    # assign to codebook, bookerror the result of calling vq.kmeans2 with W and K
    codebook, bookerror = vq.kmeans2(W, K)

    # assign to codes, error the result of calling vq.vq with W and the codebook
    codes, error = vq.vq(W, codebook)

    # return codebook, codes, and error
    return codebook, codes, error


if __name__ == '__main__':
    d = data.Data(sys.argv[1])
    headers = d.get_headers()
    print linear_regression(d, [headers[0], headers[1]], headers[10])
Example #20
0
import bilstm_att_model
import DGCNN_model
import cnn
import cnn_att
import biattention
from sklearn.model_selection import train_test_split
import gensim
import numpy as np
from sklearn.metrics import classification_report
import pickle
import pandas as pd

if __name__ == '__main__':
    path_train = '../data/train.csv'
    path_test = '../data/test_stage1.csv'
    data = data.Data(path_train, path_test)
    seq_len = data.seq_len[0]
    seq_len_test = data.seq_len[1]
    train_id = data.train_id
    label = data.label
    test_id = data.test_id
    word2index = data.word2index
    index2word = data.index2word

    x_train, x_dev, y_train, y_dev = train_test_split(train_id,
                                                      label,
                                                      random_state=42,
                                                      test_size=0.2,
                                                      stratify=label)
    seqlen_train, seqlen_dev, _, _ = train_test_split(seq_len,
                                                      label,
Example #21
0
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 19 08:43:41 2019

@author: Ashima
"""

import data
import config
import random
import numpy as np
import neural_network

if __name__ == "__main__":
    #print("Start")
    data = data.Data()
    data.read_full(config.TRAIN_INPUT, config.TRAIN_LABELS, True)
    print("Train data read successfully")

    X, Y = data.get_data()

    network = neural_network.Network(config.NUM_TRAIN,
                                     config.IMAGE_SIZE * config.IMAGE_SIZE,
                                     config.CLASSES)
    network.train(X, Y)
    #   print("Complete model training")
    print("Accuracy Plot for Train")
    network.plot_accuracy('Train')
    print("Error Plot for Train")
    network.plot_cost('Train')
    print("Accuracy Plot for Validation")
Example #22
0
def main(argv):

    #Handle OPTIONS
    try:
        opts, args = getopt.getopt(argv[1:], "ht:i:e:")
    except getopt.GetoptError:
        print "Invalid option!"
        usage()
    #Default args
    tag = None
    id = None
    notificationEmail = None

    for opt, arg, in opts:
        if (opt == "-h"):
            usage()
        elif (opt == "-t"):
            tag = arg
        elif (opt == "-i"):
            id = arg
        elif (opt == "-e"):
            notificationEmail = arg

    if tag == None:
        print "A TAG must be specified to run!"
        usage()
    if id == None:
        print "An ID must be specified to run!"
        usage()

    #handle VARIABLES
    variables, job_dir = parseArgs(args)
    if job_dir == None:
        print "No JOB_DIR given!"
        usage()
    if variables == None:
        print "Invalid option or variable decleration!"
        usage()

    #Finish setting up framework
    import config, error

    if not config.init(job_dir):
        error.warning("No Config file found. Using defaults")
    error.init()

    import data, parser, ruleParser, teu

    #Register our signalHandler now
    #We will need to start some kinda signal handler at this point so that we can have a graceful exit
    signal.signal(signal.SIGTERM, signalHandler)

    #include our command line options
    if notificationEmail != None:
        error.addEmailAddr(notificationEmail)

    variables.append(('TAG', tag))
    variables.append(('ID', id))

    #Parse the task lists
    error.message("Parsing the task lists...")
    taskList = parser.parseTaskList()
    if error.isError():
        error.fatalError("Error parsing the task lists!")

    error.message("task lists parsed OK!")

    #Parse Rules
    error.message("Parsing the rule lists...")
    ruleList = ruleParser.getRules()
    if error.isError():
        error.fatalError("Error parsing the rules list!")

    error.message("rule lists parsed OK!")

    #enforce the rules
    rulesObj = data.Data()
    rulesObj.shared.putVar("USER_LIB_PATH", config.getUserLibPath())
    rulesObj.shared.putVar("COMMON_LIB_PATH", config.getCommonLibPath())
    rulesObj.shared.putVar("TASK_LIST", taskList)

    error.message("Enforcing the Rules...")
    teu.runTaskList(ruleList, rulesObj)
    if error.isError():
        error.fatalError("Your job did not pass a FATAL rule")

    error.message("All FATAL rules passed!")

    #Start the job (run the tasks)
    error.message("Running the tasks...")
    dataObj = data.Data()

    for var in variables:
        name = var[0]
        value = var[1]
        dataObj.shared.putVar(name, value)

    teu.runTaskList(taskList, dataObj)
    if error.isError():
        error.message("Some tasks failed during execution!")
        error.message("Your job may not have completed sucessfuly!")
        error.message("Trying to exit gracefully...")
        error.gracefulExit(
            "Framework Job: " + job_dir + "-" + tag +
            "  completed with errors",
            "Framework Job: " + job_dir + "-" + tag +
            " completed, however some tasks failed during execution. Please check logs as your job may not have been successful."
        )

    error.message("All tasks completed sucessfully!")
    error.gracefulExit(
        "Job: " + job_dir + "-" + tag + " completed successfully",
        "The Task Execution Framework has completed Job: " + job_dir + "-" +
        tag)
Example #23
0
    def __init__(self, title='nucl.ai Motion Matching'):
        self.canvas = vispy.scene.SceneCanvas(title=title,
                                              size=(1280, 720),
                                              bgcolor='black',
                                              show=False,
                                              keys='interactive')

        self.params = params.Params()
        self.widget = self.canvas.central_widget
        self.view = self.canvas.central_widget.add_view()
        self.marker = vispy.scene.Markers(pos=numpy.asarray([[0, 0]]),
                                          face_color='red',
                                          size=0,
                                          parent=self.view.scene)
        self.select = True  # identify if the current tick select or advance the path
        # prepare display
        self.lines = []
        self.colors = []
        self.history = []
        self.history_pointer = 0
        for i in range(self.params.HISTORY_SIZE):
            line = vispy.scene.Line(parent=self.view.scene,
                                    color=COLOR_NEUTRAL,
                                    connect='strip',
                                    method='agg',
                                    width=HISTORY_PATH_WIDTH)
            line.transform = vispy.visuals.transforms.MatrixTransform()
            self.history.append(line)

        for i in range(self.params.TOP_PATHS_NUMBER):
            path_width = SELECTED_PATH_WIDTH if i == 0 else PATH_WIDTH
            arrow_size = SELECTED_ARROW_SIZE if i == 0 else ARROW_SIZE
            color = COLOR_SELECTED if i == 0 else COLOR_NEUTRAL
            # color = numpy.random.rand(3) # using fixed colors now
            self.colors.append(color)
            line = vispy.scene.Line(parent=self.view.scene,
                                    color=color,
                                    connect='strip',
                                    method='agg',
                                    width=path_width)
            line.transform = vispy.visuals.transforms.MatrixTransform()
            self.lines.append(line)

        self.timer_toggle = True
        self.player_position = numpy.asarray([0, 0])
        if not os.path.exists('dota2.csv'):
            print(
                "ERROR: Please download and extract this file...\nhttps://github.com/aigamedev/nuclai16/releases/download/0.0/dota2.csv.bz2\n"
            )
            sys.exit(-1)
        self.data = data.Data('dota2.csv', self.params)
        # init the searched point with some random value - after first mouse move it's a
        self.data.mouse_xy = (
            (numpy.random.rand(2) * 10 - 5) -
            numpy.asarray(self.canvas.size) / 2) * self.params.SCALE_FACTOR

        self.grid = vispy.scene.visuals.GridLines(parent=self.view.scene,
                                                  color=(1, 1, 1, 1))
        self.grid.transform = vispy.visuals.transforms.MatrixTransform()
        self.grid.transform.translate(numpy.asarray(self.canvas.size) / 2)
        self.canvas.show(visible=True)
        # HACK: Bug in VisPy 0.5.0-dev requires a click for layout to occur.
        self.canvas.events.mouse_press()

        @self.canvas.events.key_press.connect
        def on_key_press(event):
            if event.key.name == ' ':
                if self.timer_toggle: self.timer.stop()
                else: self.timer.start()
                self.timer_toggle = not self.timer_toggle

        @self.canvas.events.resize.connect
        def on_resize(event):
            self.grid.transform.reset()
            self.grid.transform.translate(numpy.asarray(self.canvas.size) / 2)
            # @TODO: translate paths

        @self.canvas.events.mouse_move.connect
        def on_mouse_move(event):
            self.data.mouse_xy = (
                numpy.asarray(self.view.camera.transform.imap(event.pos)) -
                numpy.asarray(self.canvas.size) / 2) * self.params.SCALE_FACTOR

        @self.canvas.events.draw.connect
        def on_draw(event):
            pass
Example #24
0
import tensorflow as tf
import model
import data

# 訓練データ作成担当
g = data.Data()
# GPUをすべて使わないオプション
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
# モデルを作成
model = model.make(tflite=False)
# 最適化を定義
optimizer = tf.keras.optimizers.Adam(lr=0.001)
model.compile(optimizer=optimizer,
              loss="categorical_crossentropy",
              metrics=["categorical_accuracy"])


# コールバック
class Callback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        "各エポック終了時に重みを保存する"
        model.save("weight.hdf5")


cb = Callback()
# 途中から学習する場合
initial_epoch = 0
if initial_epoch >= 1:
Example #25
0
def main(argv):
    
    if len(argv) < 3:
        print 'usage: python %s <Train CSV file> <Train categories CSV file>' % (argv[0])
        exit(-1)

    # read the features and categories data sets
    print 'Reading %s and %s' % (argv[1], argv[2])
    try:
        d = data.Data(argv[1])
    except:
        print 'Unable to open %s' % (argv[1])
        exit(-1)

        
    try:
        catdata = data.Data(argv[2])
    except:
        print 'Unable to open %s' % (argv[2])
        exit(-1)

    
    # execute PCA analysis
    print 'Executing PCA'
    pcadata = an.pca( d, d.get_headers(), False )

    print 'Evaluating eigenvalues'
    # identify how many dimensions it takes to represent 90% of the variation
    evals = pcadata.get_eigenvalues()
    #print "type:",type(evals)
    evals=np.asmatrix(evals)
    #print "type2:",type(evals)
    #print "shape:  ",evals.shape
    esum = np.sum(evals)
    
    cum = evals[0,0]
    cumper = cum / esum
    i = 1
    while cumper < 0.999:
        cum += evals[0,i]
        cumper = cum/esum
        i += 1

    print 'Dimensions to reach 99.9% of variation:', i

    cheaders = pcadata.get_headers()[:i]

    # cluster the data
    K = 6

    # Use the average of each category as the initial means
    truecats = catdata.get_data(catdata.get_headers()[0:1])
    #tmpcats = truecats - 1 
    tmpcats = truecats # Don't adjust if we're using corrected labels
    
    print 'Clustering to %d clusters' % (K)
    codebook, codes, errors = an.kmeans(pcadata, cheaders, K, categories = tmpcats)
        
    # build a confusion matrix
    confmtx = [[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]
    for i in range(codes.shape[0]):
        #confmtx[codes[i,0]][int(truecats[i,0])-1] += 1
        confmtx[codes[i,0]][int(truecats[i,0])] += 1 # don't adjust

    print "\nConfusion Matrix:\n"
    print 'Actual->     Walking   Walk-up   Walk-dwn  Sitting   Standing   Laying'
    for i in range(len(confmtx)):
        s = 'Cluster %d' % (i)
        for val in confmtx[i]:
            s += "%10d" % (val)
        print s
    print
Example #26
0
 def setUp(self):
     self.data = data.Data()
     self.proximity = s1_proximity.ProximityState()
Example #27
0
def main(argv):
    '''Reads in a training set and a test set and builds two KNN
    classifiers.  One uses all of the data, one uses 10
    exemplars. Then it classifies the test data and prints out the
    results.
    '''

    # usage
    if len(argv) < 3:
        print 'Usage: python %s <training data file> <test data file> <optional training category file> <optional test category file>' % (
            argv[0])
        exit(-1)

    # read in the training set
    data_train = data.Data(argv[1])
    # read in the test set
    data_test = data.Data(argv[2])

    # compatibility check length or argv
    if len(argv) > 4:
        # get the categories of the training data
        train_cat_data = data.Data(argv[3])
        train_cats = train_cat_data.get_data([train_cat_data.get_headers()[0]])
        # get the categories of the test data
        test_cat_data = data.Data(argv[4])
        test_cats = test_cat_data.get_data([test_cat_data.get_headers()[0]])
        # get the training data A and the test data B
        A = data_train.get_data(data_train.get_headers())
        B = data_test.get_data(data_test.get_headers())
    else:
        # just assume the categories are the last column
        train_cats = data_train.get_data([data_train.get_headers()[-1]])
        test_cats = data_test.get_data([data_test.get_headers()[-1]])
        A = data_train.get_data(data_train.get_headers()[:-1])
        B = data_test.get_data(data_test.get_headers()[:-1])


#-----------------------------------------------------------------------
# create two classifiers
    knnClass = classifier.KNN()
    print "Created Classifier, Building Now."
    # build the classifiers
    knnClass.build(A, train_cats)
    print "Built! Now classifying."

    #-----------------------------------------------------------------------
    #-Classifies the training set data and prints out a confusion matrix.
    acats, alabels = knnClass.classify(A)
    print "Done Classifying."

    unique, mapping = np.unique(np.array(train_cats.T), return_inverse=True)
    unique2, mapping2 = np.unique(np.array(alabels.T), return_inverse=True)

    mtx = knnClass.confusion_matrix(
        np.matrix(mapping).T,
        np.matrix(mapping2).T)
    print "Training Confusion Matrix:"
    print knnClass.confusion_matrix_str(mtx)

    #-----------------------------------------------------------------------
    #-----------------------------------------------------------------------
    #-Classifies the test set data and prints out a confusion matrix.
    bcats, blabels = knnClass.classify(B)
    print "Done Classifying."

    unique, mapping = np.unique(np.array(test_cats.T), return_inverse=True)
    unique2, mapping2 = np.unique(np.array(blabels.T), return_inverse=True)

    mtx1 = knnClass.confusion_matrix(
        np.matrix(mapping).T,
        np.matrix(mapping2).T)
    print "Test Confusion Matrix:"
    print knnClass.confusion_matrix_str(mtx1)

    #-----------------------------------------------------------------------
    #Writes out a new CSV data file with the test set data
    # and the categories as an extra column
    data_test.addColumn("KNN Classification", bcats)
    data_test.toFile(filename="knnClass.csv")

    return
Example #28
0
	def __init__(self, width, height):

		# create a tk object, which is the root window
		self.root = tk.Tk()


		# width and height of the window
		self.initDx = width
		self.initDy = height

		# set up the geometry for the window
		self.root.geometry( "%dx%d+50+30" % (self.initDx, self.initDy) )

		# set the title of the window
		self.root.title("Random Datapoints")

		# set the maximum size of the window for resizing
		self.root.maxsize( 1600, 900 )

		# setup the menus
		self.buildMenus()

		# build the controls
		self.buildControls()

		# build the Canvas
		self.buildCanvas()

		# bring the window to the front
		self.root.lift()

		# - do idle events here to get actual canvas size
		self.root.update_idletasks()

		# now we can ask the size of the canvas
		print self.canvas.winfo_geometry()

		# set up the key bindings
		self.setBindings()

		# set up the application state
		self.objects = [] # list of data objects that will be drawn in the canvas

		self.data = None # will hold the raw data someday.
		
		self.baseClick = None # used to keep track of mouse movement
		
		#create new view object
		self.v=view.View()
		
		#create new data object
		self.data=data.Data()

		self.linRegLines = []

		self.pcaList = []
		self.num_PCA=0
		self.num_cluster=0

		self.clusterList=[]


		self.linRegEndpoints = None
		self.codebook=None
		self.codes=None
		#created matrix of endpoints of x,y, and z axes
		self.axes=np.matrix([[0,0,0,1],[1,0,0,1],[0,0,0,1],[0,1,0,1],[0,0,0,1],[0,0,1,1]])

		#list to store the lines for the x, y, and z axes
		self.lines=[0,0,0]

		self.colorlist=[]
		
		#calls buildAxes function
		self.buildAxes()
		
		#create matrix for data points
		self.dataPointMatrix=np.zeros((4,4))
		
		#updates the axes
		self.updateAxes()

		self.updateFits()
Example #29
0
import torch
import os

import utility
import data
import model
import loss
from option import args
from torch.nn import DataParallel

torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)

print(os.getcwd())

if checkpoint.ok:
    loader = data.Data(args)
    model = model.Model(args, checkpoint)
    model = DataParallel(model)
    loss = loss.Loss(args, checkpoint) if not args.test_only else None
    t = Trainer(args, loader, model, loss, checkpoint)
    while not t.terminate():
        t.train()
    #     t.test()

    checkpoint.done()

def main(argv):
    '''Reads in a training set and a test set and builds two KNN
    classifiers.  One uses all of the data, one uses 10
    exemplars. Then it classifies the test data and prints out the
    results.
    '''

    # usage
    if len(argv) < 3:
        print(
            'Usage: python %s <training data file> <test data file> <optional training category file> <optional test category file>'
            % (argv[0]))
        exit(-1)

    # read the training and test sets
    dtrain = data.Data(argv[1])
    dtest = data.Data(argv[2])

    # get the categories and the training data A and the test data B
    if len(argv) > 4:
        traincatdata = data.Data(argv[3])
        testcatdata = data.Data(argv[4])
        traincats = traincatdata.get_data([traincatdata.get_headers()[0]])
        testcats = testcatdata.get_data([testcatdata.get_headers()[0]])
        A = dtrain.get_data(dtrain.get_headers())
        B = dtest.get_data(dtest.get_headers())
    else:
        # assume the categories are the last column
        traincats = dtrain.get_data([dtrain.get_headers()[-1]])
        testcats = dtest.get_data([dtest.get_headers()[-1]])
        A = dtrain.get_data(dtrain.get_headers()[:-1])
        B = dtest.get_data(dtest.get_headers()[:-1])

    # create two classifiers, one using 10 exemplars per class
    knncall = classifiers.KNN()
    knnc10 = classifiers.KNN()

    # build the classifiers
    knncall.build(A, traincats)
    knnc10.build(A, traincats, 10)

    # use the classifiers on the test data
    allcats, alllabels = knncall.classify(B)

    tencats, tenlabels = knnc10.classify(B)

    # print the results
    print('Results using All Exemplars:')
    print('     True  Est')
    for i in range(allcats.shape[0]):
        if int(testcats[i, 0]) == int(alllabels[i, 0]):
            print("%03d: %4d %4d" %
                  (i, int(testcats[i, 0]), int(alllabels[i, 0])))
        else:
            print("%03d: %4d %4d **" %
                  (i, int(testcats[i, 0]), int(alllabels[i, 0])))

    print(knnc10)

    print('Results using 10 Exemplars:')
    print('     True  Est')
    for i in range(tencats.shape[0]):
        if int(testcats[i, 0]) == int(tenlabels[i, 0]):
            print("%03d: %4d %4d" %
                  (i, int(testcats[i, 0]), int(tenlabels[i, 0])))
        else:
            print("%03d: %4d %4d **" %
                  (i, int(testcats[i, 0]), int(tenlabels[i, 0])))

    return