예제 #1
0
def load_data():
    clean_wavs, noisy_wavs, clean_fps, noisy_fps, transcripts = zip(*loader())
    clean_wavs = [wav.astype('float32') for wav in clean_wavs]
    clean_wavs = [wav.astype('float32') for wav in clean_wavs]


    # pad_or_trim and normalize  ### MUST SHUFFLE AND SPLIT!

    clean_wavs_padded = \
        np.array([pad(x) for x in clean_wavs]).astype('float32')
    clean_wavs_padded = clean_wavs_padded / clean_wavs_padded.max()

    noisy_wavs_padded = \
        np.array([pad(x) for x in noisy_wavs]).astype('float32')
    noisy_wavs_padded = noisy_wavs_padded / noisy_wavs_padded.max()

    # pretrained_pipeline = asr.load('deepspeech2', lang='en')
    enc = asr.text.Alphabet(lang='en')._str_to_label

    # enc = pretrained_pipeline._alphabet._str_to_label
    encoded_transcripts = \
        [[enc[char] for char in label] for label in transcripts]
    encoded_transcripts_padded = \
        np.array([pad(x, 91) for x in encoded_transcripts], dtype='float32')

    # return clean_wavs_padded, encoded_transcripts_padded
    return noisy_wavs_padded, clean_wavs_padded
def train(model, optimizer, num_epochs, batch_size, learning_rate):
    model.load_state_dict(
        torch.load('./300_60_teachers6.pth',
                   map_location=lambda storage, loc: storage))
    train_loader, _ = loader()
    valid_loader = ValidLoader()

    for epoch in range(num_epochs):
        model.train()
        for index, (data, label) in enumerate(train_loader):
            data = Variable(data)
            if use_cuda:
                data = data.cuda()

            # ===================forward=====================
            encoder_outputs, encoder_hidden = model.encode(data, max_batch_len)
            #print(encoder_outputs.data.shape, encoder_hidden.data.shape)

            decoder_hidden = encoder_hidden
            #print('deocder input', decoder_input.shape, 'decoder hidden', decoder_hidden.data.shape)

            loss = model.decode(label,
                                decoder_hidden,
                                encoder_outputs,
                                i=False)

            # ===================backward====================
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # ===================log========================
        torch.save(model.state_dict(), './300_60_teachers5.pth')
        print('epoch [{}/{}], loss:{:.4f}'.format(epoch + 1, num_epochs,
                                                  loss.data[0]))
    def test_loader(self):
        filename = "./Challenge/input.txt"
        loader(filename)
        with open('./Challenge/output.txt') as output:
            expected = output.readlines()
        with open('result.txt') as f:
            result = f.readlines()

        for i in range(len(expected)):
            expected_data = json.loads(expected[i])
            result_data = json.loads(result[i])
            self.assertEqual(expected_data["id"], result_data["id"])
            self.assertEqual(expected_data["accepted"],
                             result_data["accepted"])
            self.assertEqual(expected_data["customer_id"],
                             result_data["customer_id"])
예제 #4
0
def spike(x):

    time_range = 2247
    truth, shape = loader.loader(
        'D:\\document\\体外网络发放数据\\a195e390b707a65cf3f319dbadbbc75f_6b245c0909b1a21072dd559d4203e15b_8.txt')
    ture = truth[200::, :]

    start = torch.tensor(truth[0:200, :],dtype=torch.float32)
    ture = torch.tensor(ture, dtype=torch.float32)
    spike = torch.zeros(size=(shape[0]-200,shape[1]),dtype=torch.float32)
    spike = torch.cat([start,spike],dim=0)
    result = torch.empty((0,69))

    for time in range(200,time_range):
        spike_F = spike[time-1,:]
        #spike_F = truth[time-1,:]
        spike_F = torch.tensor(spike_F,dtype=torch.float32)
        spike_t = -spike_F +spike_F@ x.t() + 0.01*torch.randn((69))
        spike_t = spike_t.unsqueeze(0)
        spike_t = torch.clamp(spike_t,min=0.0)


        result = torch.cat([result,spike_t],dim=0)
        #spike[time, :] = -spike[time-1, :] +spike[time-1, :]@ x.t()





    return torch.sum((result-ture)**2),result
예제 #5
0
    def __init__(self, data, type = 'raw'):
        self.__data = loader(data, type).load()
        self.score = Score()
        self.interpret = Interpret()

        prerequisites = self.__data['prerequisites']
        self.__prerequisites = {}
        for prerequisite in prerequisites:
            self.__prerequisites[prerequisite.get('label')] = prerequisite
예제 #6
0
파일: vm.py 프로젝트: abael/pyvm
    def __init__(self, filename):
        self.memory = loader(filename)
        self.instruction_set = {v['opcode']: k 
                for k,v in instruction_set.iteritems()}
        self.registers = {v: k.lower() for k, v in registers.iteritems()}
        [setattr(self, k, 0) for k in self.registers.values()]

        while (self.do_instruction()):
            pass

        from pprint import pprint
        pprint({reg: getattr(self, reg) for reg in self.registers.values()})
예제 #7
0
파일: train.py 프로젝트: sedab/ge
def train(model, optimizer, num_epochs, batch_size, learning_rate):
    #model.load_state_dict(torch.load('./autoencoder.pth'))
    train_loader, valid_loader = loader()

    latent_representation = []
    representation_indices = []
    for epoch in range(num_epochs):
        model.train()
        for index, (data, label) in enumerate(train_loader):
            #batch_onehot = _one_hot(data, max_batch_len)
            label_onehot = _one_hot(label, max_batch_len, use_cuda)

            if use_cuda:
                data = Variable(data).cuda()
            else:
                data = Variable(data)

            # ===================forward=====================
            if epoch != num_epochs - 1:
                encoder_outputs, encoder_hidden = model.encode(
                    data, max_batch_len)
            #print(encoder_outputs.data.shape, encoder_hidden.data.shape)
            else:
                encoder_outputs, encoder_hidden = model.encode(
                    data, max_batch_len)  #, collect_filters = True)
                rep = encoder_outputs.data.view(64, -1).numpy()
                if index == 100:
                    print(
                        'good job, everything looks good: a batchs latent rep has shape',
                        rep.shape)
                #latent_representation.append(rep)
                #representation_indices.append(label.numpy())

            decoder_hidden = encoder_hidden
            #print('deocder input', decoder_input.shape, 'decoder hidden', decoder_hidden.data.shape)

            #send in corrupted data to recover clean data
            loss = model.decode(data, label_onehot, decoder_hidden,
                                encoder_outputs, max_batch_len)

            if index % 1000 == 0:
                print(epoch, index, loss.data[0])
                print(evaluate(model, valid_loader))
                model.train()
            # ===================backward====================
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        # ===================log========================
        torch.save(model.state_dict(), './autoencoder.pth')
        print('epoch [{}/{}], loss:{:.4f}'.format(epoch + 1, num_epochs,
                                                  loss.data[0]))
예제 #8
0
def check_imgs():
	root_path = "/media/nvme0n1/DATA/TRAININGSETS/"

	savePath = root_path+"mnist/"
	ld = loader(savePath+"X.pa", savePath+"Y.pa")
	data_checker(ld, "./data/mnist.png")

	savePath = root_path+"cifar/"
	ld = loader(savePath+"X_10.pa", savePath+"Y_10.pa")
	data_checker(ld, "./data/cifar_10.png")

	savePath = root_path+"cifar/"
	ld = loader(savePath+"X_100.pa", savePath+"Y_100.pa")
	data_checker(ld, "./data/cifar_100.png")

	savePath = root_path+"coil20/"
	ld = loader(savePath+"X_unprocessed.pa", savePath+"Y_unprocessed.pa")
	data_checker(ld, "./data/coil20-unprocessed.png")

	savePath = root_path+"coil20/"
	ld = loader(savePath+"X_processed.pa", savePath+"Y_processed.pa")
	data_checker(ld, "./data/coil20-processed.png")

	savePath = root_path+"fer2013/"
	ld = loader(savePath+"X.pa", savePath+"Y.pa")
	data_checker(ld, "./data/fer2013.png")
예제 #9
0
    def do_POST(self):

        print("incomming http: ", self.path)

        content_length = int(
            self.headers['Content-Length'])  # <--- Gets the size of data
        post_data = self.rfile.read(
            content_length)  # <--- Gets the data itself
        post_data = json.loads(post_data)
        print("this is the object in post data:")

        # 		print(post_data['digits'])

        l = loader()
        l.load_libsvm_data_array(post_data['digits'],
                                 num_features=784,
                                 one_hot=0,
                                 classes=None)
        MINIBATCH_SIZE = len(post_data['digits'])
        images = np.zeros((MINIBATCH_SIZE, 28, 28))
        for i in range(0, MINIBATCH_SIZE):
            images[i, :, :] = (l.a[i].reshape((28, 28))).astype('int')
        in_buffer = xlnk.cma_array(shape=(MINIBATCH_SIZE * num_lines, 64),
                                   dtype=np.uint8)
        out_buffer = xlnk.cma_array(shape=(MINIBATCH_SIZE, 16), dtype=np.int32)
        print('allocated buffers')

        for i in range(0, MINIBATCH_SIZE):
            in_buffer[i * num_lines:(i + 1) * num_lines,
                      0:56] = np.reshape(images[i, :, :], (num_lines, 56))

        start = time.time()
        nn_ctrl.write(0x0, 0)  # Reset
        nn_ctrl.write(0x10, MINIBATCH_SIZE)
        nn_ctrl.write(0x0, 1)  # Deassert reset
        dma.recvchannel.transfer(out_buffer)
        dma.sendchannel.transfer(in_buffer)
        end = time.time()

        time_per_image = (end - start) / MINIBATCH_SIZE
        print("Time per image: " + str(time_per_image) + " s")
        print("Images per second: " + str(1.0 / time_per_image))

        time.sleep(1)
        outp = ""
        for i in range(0, MINIBATCH_SIZE):
            print(str(np.argmax(out_buffer[i, :])))
            outp = outp + str(np.argmax(out_buffer[i, :]))
        print(outp)
        self._set_headers()
        self.wfile.write(outp.encode("utf-8"))
예제 #10
0
def spike(x):
    time_range = 2247
    truth, shape = loader.loader(
        'D:\\document\\体外网络发放数据\\a195e390b707a65cf3f319dbadbbc75f_6b245c0909b1a21072dd559d4203e15b_8.txt')
    ture = truth[200::, :]

    start =tf.tensor(truth[0:200, :],dtype=tf.float32)
    ture = tf.tensor(ture, dtype=tf.float32)

    spike = tf.zeros(size=(shape[0]-200,shape[1]),dtype=tf.float32)
    spike = tf.concat([start,spike],axis=0)
    print(spike.size())
    def condition(self,time):
        time <time_range
예제 #11
0
파일: vm.py 프로젝트: tongzx/pyvm
    def __init__(self, filename):
        self.memory = loader(filename)
        self.instruction_set = {
            v['opcode']: k
            for k, v in instruction_set.iteritems()
        }
        self.registers = {v: k.lower() for k, v in registers.iteritems()}
        [setattr(self, k, 0) for k in self.registers.values()]

        while (self.do_instruction()):
            pass

        from pprint import pprint
        pprint({reg: getattr(self, reg) for reg in self.registers.values()})
예제 #12
0
def main():
    parser = argparse.ArgumentParser(description="-----[#]-----")

    # Model
    parser.add_argument("--learning_rate",
                        default=1e-4,
                        type=float,
                        help="learning rate")
    parser.add_argument("--epoch",
                        default=100,
                        type=int,
                        help="number of max epoch")
    parser.add_argument('--input_dimension',
                        type=int,
                        default=1,
                        help='이미지 가로 차원 수')
    parser.add_argument('--latent_dimension',
                        type=int,
                        default=25,
                        help='latent variable dimension')

    # Data and train
    parser.add_argument('--batch_size',
                        type=int,
                        default=256,
                        help='batch size for training [default: 128]')
    parser.add_argument("--gpu_device",
                        default=0,
                        type=int,
                        help="the number of gpu to be used")
    parser.add_argument('--printevery',
                        default=100,
                        type=int,
                        help='log , print every % iteration')
    parser.add_argument('--experiment',
                        type=str,
                        default='Abnormal_class_0_ae',
                        help='experiment name')
    parser.add_argument('--abnormal_class',
                        type=int,
                        default=0,
                        help='abnormal class')

    args = parser.parse_args()
    args.loader = loader(args)
    args.model = AE(input_size=28 * 28).cuda(args.gpu_device)

    gc.collect()
    train(args)
예제 #13
0
def algorithm_advanced(blue_winrates):
    l = loader()
    weights = l.getLaneWeights()
    power_base = l.getAlgorithmConstants()['basic power_base']
    divisor = power_base**50
    for i in range(len(weights)):
        blue_winrates[i] = math.log(
            (power_base**blue_winrates[i]) / divisor) + 50
        blue_winrates[i] *= weights[i]
    print(blue_winrates)
    blue_tot = 0
    for item in blue_winrates:
        blue_tot += item

    return blue_tot / 5
예제 #14
0
def getData(folders):
    data = []
    for i in range(len(folders)):
        patient = []
        dataset = loader(path=folders[i],
                         stride=512,
                         patch_size=512,
                         augment=False)
        data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False)
        print('loading ' + str(i))
        for index, (image, file_name) in enumerate(data_loader):
            image = image.squeeze()
            patient.extend(image.numpy())
        data.append(patient)
    return data
예제 #15
0
    def __init__(self, env_list=default_envs, algos_list=default_algos):
        self.env_list = env_list
        self.algos_list = algos_list
        self.n_algos = len(self.algos_list)
        self.envs = dict()
        self.rewards = defaultdict(dict)
        self.models = defaultdict(dict)  # HAY QUE GUARDAR LOS MODELOS PARA ENSEMBLE

        for env_name in self.env_list:
            new_env = make_atari_env(env_name, num_env=1, seed=0)
            new_env = VecFrameStack(new_env, n_stack=4)
            self.envs[env_name] = new_env

        for algo in self.algos_list:
            for env_name, env in self.envs.items():
                self.models[env_name][algo] = loader(algo, env_name)
예제 #16
0
파일: run_lstm.py 프로젝트: kishkash555/biu
def train_lstm():
    ld = loader.loader(None, base_len=20)
    lstm = lfi.ind_lstm(20, 200)
    optimizer = optim.SGD(lstm.parameters(), lr=0.01, momentum=0.1)

    for epoch in range(2000):
        total_mse = total_l1reg = 0.
        for data in ld.generator(standardize=True):
            blocks = [block for (_, block, _) in data]
            optimizer.zero_grad()
            loss, mse_loss, l1reg_loss = lstm(blocks)
            loss.backward()
            optimizer.step()
            total_mse += mse_loss
            total_l1reg += l1reg_loss
        print(epoch, total_mse, total_l1reg)
        if epoch % 50 == 0:
            print("saving")
            torch.save(lstm, 'lstm_model.trch')
예제 #17
0
def train(model, epochs):
  criterion = nn.MSELoss()
  losslist = []
  #running_loss = 
  epochloss = 0.
  optimizer = optim.Adam(model.parameters(),lr=0.1,weight_decay=0.0001)
  data = loader.loader(None, 'disk')

  for epoch in range(epochs):
    l = 0
    print("Entering Epoch: ",epoch)
    for id, x, y in data.generator(training=True):
      optimizer.zero_grad()
      encoded, decoded = model(x)
      loss = criterion(decoded,x) # + 0.01*torch.abs(torch.norm(decoded)-torch.norm(x))
      if torch.isnan(loss) or loss.item()>1:
          print('a')
      loss.backward()

      #if l%50==0:
      #  print(y, y_hat.item(), loss.item(), model.linear1.weight.grad)
      optimizer.step()
    
#      running_loss += loss.item()
      epochloss += loss.item()
      l += 1
    losslist.append(epochloss/l)
    print("\n======> epoch: {}/{}, Loss:{:.4f}, l: {}".format(epoch,epochs,100*epochloss/l, l))
    #print("\tlast loss: {:.4f} + {:.4f}".format(criterion(decoded,x),  0.01*torch.abs(torch.norm(decoded)-torch.norm(x))))
    epochloss=0

    if 0:
      validation_loss=0.
      m = 0
      for id, x, y in data.generator(training=False):
        m += 1
        with torch.no_grad():
          y_hat = model(x)
          loss = criterion(y_hat.view(1), torch.Tensor([y]))
          validation_loss += loss.item()
      print("-------> Validation Loss: {:.6f}, l: {}".format(100*validation_loss/m, m))
예제 #18
0
def train(model, epochs):
    criterion = nn.MSELoss()
    losslist = []
    #running_loss =
    epochloss = 0.
    optimizer = optim.SGD(model.parameters(), lr=0.1, weight_decay=1e-5)
    data = loader.loader(None, 'disk', True)

    for epoch in range(epochs):
        l = 0
        print("Entering Epoch: ", epoch)
        for id, x, y in data.generator(training=True):
            optimizer.zero_grad()
            y_hat = model(x)
            loss = criterion(y_hat.view(1), torch.Tensor([y]))

            loss.backward()

            #if l%50==0:
            #  print(y, y_hat.item(), loss.item(), model.linear1.weight.grad)
            optimizer.step()

            #      running_loss += loss.item()
            epochloss += loss.item()
            l += 1
        losslist.append(epochloss / l)
        print("\n======> epoch: {}/{}, Loss:{:.6f}, l: {}".format(
            epoch, epochs, 100 * epochloss / l, l))
        epochloss = 0

        if not (epoch % 5):
            validation_loss = 0.
            m = 0
            for id, x, y in data.generator(training=False):
                m += 1
                with torch.no_grad():
                    y_hat = model(x)
                    loss = criterion(y_hat.view(1), torch.Tensor([y]))
                    validation_loss += loss.item()
            print("-------> Validation Loss: {:.6f}, l: {}".format(
                100 * validation_loss / m, m))
예제 #19
0
def main():
    loader = ld.loader()
    # crimeData = loader.loadCrime("communities.data.csv")
    # irisData = loader.loadIris("Iris.csv")
    cancerData = loader.pdLoadCancer("data.csv")

    # shortLength = 20
    # T = math.ceil(math.sqrt(shortLength))
    shortCancerData = cancerData.ix[:, 1:]

    # gaModel = ga.GeneticAlgorithm(shortCancerData, popSize=200, maxGeneration=100, limit=5, silent=False)
    # gaModel.startSearch()
    # print gaModel.result

    # saModel = sa.SimAnnealing(shortCancerData, limit=7, silent=False)
    # saModel.startSearch()
    # print saModel.result

    sawacoModel = sawaco.SaWAco(shortCancerData, limit=7, silent=False)
    sawacoModel.startSearch()
    print sawacoModel.result
예제 #20
0
def train(model, myloss,  epoch):
    model.train()
    train_data = np.random.random(size=(69,69))
    train_data = torch.tensor(train_data,requires_grad=True)
    truth,shape = loader.loader('D:\\document\\体外网络发放数据\\a195e390b707a65cf3f319dbadbbc75f_6b245c0909b1a21072dd559d4203e15b_8.txt')
    truth = truth[200::,:]
    truth = torch.tensor(truth,dtype=torch.float32)
    optimizer = optim.SGD([train_data,], lr=0.001)
    for epoch_id in range(epoch):
        train_data.cuda()
        optimizer.zero_grad()
        output = model(train_data)
        output = torch.unsqueeze(output,dim=0)
        truth = torch.unsqueeze(truth,dim=0)

        loss = myloss(output, truth)
        loss.backward()
        optimizer.step()
        if epoch_id % 1 == 0:
            print('Train Epoch: {} \tloss: {:.6f}'.format(
                epoch,  loss.data.cpu().numpy()[0]))
예제 #21
0
def my_fit_and_score(train_test_parameters,
                     estimator=None,
                     X=None,
                     y=None,
                     verbose=False,
                     fit_params=None,
                     return_parameters=True,
                     scorer=None,
                     x_is_index=True,
                     names=('X', 'y')):
    from runner import bac_scorer, bac_error, confusion_matrix, process_cm

    train, test, parameters = train_test_parameters

    if x_is_index:
        index = X
        X = None
    if X is None:
        if 'X' in globals():
            X = globals()[names[0]]
            y = globals()[names[1]]
        else:
            X, y = loader(names[0], names[1])()
            globals()[names[0]] = X
            globals()[names[1]] = y

    if x_is_index:
        X = X[index]
        y = y[index]

    return _fit_and_score(estimator=estimator,
                          X=X,
                          y=y,
                          verbose=verbose,
                          parameters=parameters,
                          fit_params=fit_params,
                          return_parameters=return_parameters,
                          train=train,
                          test=test,
                          scorer=bac_scorer)
예제 #22
0
 def run(self):
     status = loader("The DNS service start")
     status.start()
     host = '127.0.0.1'
     port = int(config.readConfFile("VPNPort"))
     logs.addLogs(
         "INFO : The VPN service has started, he is now listening to the port "
         + str(port))
     try:
         try:
             tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
             tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
             #tcpsock = ssl.wrap_socket(tcpsock, ssl_version=ssl.PROTOCOL_TLSv1, ciphers="ADH-AES256-SHA")
             tcpsock.bind((host, port))
             tcpsock.settimeout(5)
         except OSError as e:
             logs.addLogs("ERROR : In vpn.py : " + str(e))
             logs.addLogs("FATAL : Shutting down...")
             print(
                 c("red") + "An error occured. Please restart WTP." + c(""))
         else:
             logs.addLogs(
                 "INFO : The VPN service has started, he is now listening to the port "
                 + str(port))
             status.stop()
             status.join()
             while self.serveur_lance:
                 try:
                     tcpsock.listen(10)
                     (clientsocket, (ip, port)) = tcpsock.accept()
                 except socket.timeout:
                     pass
                 else:
                     newthread = ClientThread(ip, port, clientsocket)
                     newthread.start()
     except KeyboardInterrupt:
         print(" pressed: Shutting down ...")
         print("")
     logs.addLogs("INFO : WTP service has been stoped successfully.")
예제 #23
0
    def setup(self, jdb_data):
        global g_falcon_api
        # g_falcon_api = falcon.API(middleware=[HandleException(jdb_data)])
        g_falcon_api = falcon.API()
        app_log.debug("falcon:api_router:setup: start")
        self.loader = loader(g_falcon_api, jdb_data)
        self.loader.setup(JDBE_API_VERSION)

        jdbe_gunicorn_options = {
            "bind": "{}:{}".format(JDBE_API_SERVER, JDBE_API_PORT),
            "workers": utils.number_of_workers(),
            "timeout": 180,
            "worker_class": "gevent",
            "worker_connections": 1000,
            "threads": 3,
        }
        print "!!!!!!!!!!!@@@@@@@@@@", utils.number_of_workers()
        app_log.debug("api_router:falcon:opts:{}".format(jdbe_gunicorn_options))

        jdbe_gunicorn_glue(jdbe_gunicorn_cb, jdbe_gunicorn_options).run()

        app_log.debug("falcon:api_router:setup: complete")
예제 #24
0
def algorithm_basic(blue_winrates, red_winrates):
    l = loader()
    weights = l.getLaneWeights()
    power_base = l.getAlgorithmConstants()['basic power_base']
    divisor = power_base**50
    for i in range(len(weights)):
        blue_winrates[i] = math.log((power_base**blue_winrates[i]) / divisor)
        red_winrates[i] = math.log((power_base**red_winrates[i]) / divisor)
        blue_winrates[i] *= weights[i]
        red_winrates[i] *= weights[i]
        blue_winrates[i] += 50
        red_winrates[i] += 50
    blue_tot = 0
    for item in blue_winrates:
        blue_tot += item

    red_tot = 0
    for item in red_winrates:
        red_tot += item

    total = blue_tot + red_tot

    return 100 * blue_tot / total
예제 #25
0
    def set_up_editor(self):
        self.back_win = Tk()
        self.back_win.title("Project")
        self.back_win.geometry("+900+10")
        self.back_win.geometry("900x900")

        if self.is_old_project:
            self.set_up_scene_win()
            self.saver = saver(self.gameObjects_lst, self.old_project_file,
                               self.objs_table_name, self.is_old_project,
                               self.compiled)
            self.set_up_labels()
            self.loader = loader(self.old_project_file, self)
            self.file_name = self.old_project_file
            self.loader.load()
        else:
            self.set_up_scene_win()
            self.file_name = self.generate_file_name()
            self.saver = saver(self.gameObjects_lst, self.file_name,
                               self.objs_table_name, self.is_old_project,
                               self.compiled)
            self.set_up_labels()

        self.back_win.mainloop()
예제 #26
0
def check_shape():
	root_path = "/media/nvme0n1/DATA/TRAININGSETS/"

	savePath = root_path+"mnist/"
	ld = loader(savePath+"X.pa", savePath+"Y.pa")
	X, Y = next(ld.get())
	print("[mnist]:", X.shape, X.dtype, X.max(), Y.shape, Y.dtype, Y.max())
		

	savePath = root_path+"cifar/"
	ld = loader(savePath+"X_10.pa", savePath+"Y_10.pa")
	X, Y = next(ld.get())
	print("[cifar10]:", X.shape, X.dtype, X.max(), Y.shape, Y.dtype, Y.max())
		

	savePath = root_path+"cifar/"
	ld = loader(savePath+"X_100.pa", savePath+"Y_100.pa")
	X, Y = next(ld.get())
	print("[cifar100]:", X.shape, X.dtype, X.max(), Y.shape, Y.dtype, Y.max())
		

	savePath = root_path+"coil20/"
	ld = loader(savePath+"X_unprocessed.pa", savePath+"Y_unprocessed.pa")
	X, Y = next(ld.get())
	print("[coil20-unprocessed]:", X.shape, X.dtype, X.max(), Y.shape, Y.dtype, Y.max())
		

	savePath = root_path+"coil20/"
	ld = loader(savePath+"X_processed.pa", savePath+"Y_processed.pa")
	X, Y = next(ld.get())
	print("[coil20-processed]:", X.shape, X.dtype, X.max(), Y.shape, Y.dtype, Y.max())
		

	savePath = root_path+"fer2013/"
	ld = loader(savePath+"X.pa", savePath+"Y.pa")
	X, Y = next(ld.get())
	print("[fer2013]:", X.shape, X.dtype, X.max(), Y.shape, Y.dtype, Y.max())
예제 #27
0
파일: launcher.py 프로젝트: anon1892/WTP
from serveurDNS import ServDNS
from vpn import ServVPN
#from bridge import Bridge
import config
from thrdLnch import ThreadLauncher
import cmdLauncher
from color import c
#import ssl

logs.addLogs("\n\n\n")
# On vérifie que les sources sont correctes et pas modifiées
#maj.verifSources()
config.verifConfig()
host = '127.0.0.1'
port = int(config.readConfFile("defaultPort"))
status = loader("Start Up")  # Ici et pas avant car si le fichier n'existe pas
status.start()  # L'assistant est invisible.


class ServeurThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.serveur_lance = True
        self.ip = ""
        self.port = 0
        self.clientsocket = ""

    def run(self):
        while self.serveur_lance:
            tcpsock.listen(10)
            tcpsock.settimeout(5)
예제 #28
0
import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
from torch.autograd import Variable
from torch.nn import functional as F
import loader
neuron_filter = np.loadtxt('neuron_graph.txt')
print(neuron_filter)
#neuron_filter = torch.tensor(neuron_filter,dtype=torch.float32)
PATH = 'D:\\Renyi\\culture_network\\stimulation_data_liner_regression_v.pkl'
truth, shape = loader.loader('spike_matrix.txt')
print(shape)
x1 = truth[0:-2, :]
x = truth[1:-1, :]
x = torch.tensor(x, dtype=torch.float32)
x1 = torch.tensor(x1, dtype=torch.float32)
x_1, xishape = loader.loader('voltage.txt')
#x1 = torch.tensor(x_1[0:-1,:],dtype=torch.float32)

xtotal = torch.cat([x, x1], dim=1)
print(xtotal.size())
y = truth[2::, :]
print(y.shape)
y = torch.tensor(y, dtype=torch.float32)


class fit(nn.Module):
예제 #29
0
import loader;

loader.loader();
예제 #30
0
파일: testHMM.py 프로젝트: JGLee6/kddc2010
def processor(data):
    """
    This is a functional form of data_processor sans some features.
    
    Inputs
    ------
    data : string
        path and file name of data (e.g. ld.trainDat)
        
    Returns
    -------
    xy_keys : list
        list of keys for data dictionary
    dat_array : ndarray
        parsed data dictionary now stored in numpy array
    tag_master : list
        list of tags from Knowledge Component data
    tag_array : ndarray
        array of knowledge component presence in each question
    opp_array : ndarray
        array of opportunity count for each component in each question
    """
    xy_keys,xy_train = ld.loader(data)
    
    # Process time strings to seconds
    for i in range(4):
        print 'Processing ' + time_strings[i]
        xy_train[time_strings[i]] = ld.convert_times(xy_train[time_strings[i]])
        
    # Convert Step Duration to seconds
    xy_train['Step Duration (sec)'] = (xy_train['Step End Time']-
                                        xy_train['Step Start Time'])

    # Dictionary of anonId and problem tags
    all_dicts = []

    # Process string ids
    for i in range(2):
        print 'Processing ' + id_strings[i]
        xy_train[id_strings[i]],temp = ida.ID_assigner(xy_train[id_strings[i]])
        all_dicts.append(temp)

    xy_train['Problem Hierarchy'],temp,temp2 = ida.unit_ID_assigner(
                                                xy_train['Problem Hierarchy'])
    all_dicts.append(temp)
    all_dicts.append(temp2)
    
    #These are the variables I care about at the moment, add if want more - JGL
    # 'Anon Student Id','Incorrects','Corrects','Problem View',
    #'Correct Transaction Time','Correct First Attempt','Step Start Time',
    #'First Transaction Time','Problem Hierarchy','Hints','Step End Time']
    # KC(Default) and Opportunity(Default) separate arrays.
    dat_array = np.empty([datLen,14])
    dat_array[:,0] = xy_train['Anon Student Id']
    dat_array[:,1] = xy_train['Problem Name']
    dat_array[:,2] = xy_train['Problem Hierarchy']
    dat_array[:,3] = np.array(xy_train['Incorrects'],dtype=int)
    dat_array[:,4] = np.array(xy_train['Hints'],dtype=int)
    dat_array[:,5] = np.array(xy_train['Corrects'],dtype=int)
    dat_array[:,6] = np.array(xy_train['Correct First Attempt'],dtype=int)
    dat_array[:,7] = np.array(xy_train['Problem View'],dtype=int)
    dat_array[:,8] = xy_train['Step Start Time']
    dat_array[:,9] = xy_train['First Transaction Time']
    dat_array[:,10] = xy_train['Correct Transaction Time']
    dat_array[:,11] = xy_train['Step End Time']
    dat_array[:,12] = xy_train['Step Duration (sec)']
    dat_array[:,13] = ld.check_final_answer(xy_train['Step Name'])
    
    # Process Knowledge components
    tag_master = tg.string_tags(xy_train['KC(Default)'])

    # Process opportunity
    tag_array,opp_array = tg.tags_to_array(
                                xy_train['KC(Default)'],
                                xy_train['Opportunity(Default)'],
                                tag_master)
    
    
    return xy_keys, dat_array, tag_master, tag_array, opp_array
예제 #31
0
def processor_test(data, dicts, tags, master = False):
    """
    This is a functional form of data_processor sans some features.
    
    Inputs
    ------
    data : string
        path and file name of data (e.g. ld.testDat)
    dicts : list
        list of dictionaries from processing training data
    tags : list
        list of tags from processing training data
        
    Returns
    -------
    xy_keys : list
        list of keys for data dictionary
    dat_array : ndarray
        parsed data dictionary now stored in numpy array
    tags2 : list
        list of tags from Knowledge Component data
    tag_array : ndarray
        array of knowledge component presence in each question
    opp_array : ndarray
        array of opportunity count for each component in each question
    """
    xy_keys,xy_test = ld.loader(data)

    # Dictionary of anonId and problem tags
    all_dicts = []

    # Process string ids
    for i in range(2):
        print 'Processing ' + id_strings[i]
        xy_test[id_strings[i]],temp = ida.ID_assigner_TEST(xy_test[id_strings[i]],
                                        dicts[i])
        all_dicts.append(temp)

    xy_test['Problem Hierarchy'],temp,temp2 = ida.unit_ID_assigner_TEST(
                                                xy_test['Problem Hierarchy'],
                                                dicts[2],dicts[3])
    all_dicts.append(temp)
    all_dicts.append(temp2)
    
    #Scale
    datLen = len(xy_test[xy_keys[0]])
    
    #These are the variables I care about at the moment, add if want more - JGL
    # 'Anon Student Id','Incorrects','Corrects','Problem View',
    #'Correct Transaction Time','Correct First Attempt','Step Start Time',
    #'First Transaction Time','Problem Hierarchy','Hints','Step End Time']
    # KC(Default) and Opportunity(Default) separate arrays.
    dat_array = np.zeros([datLen,16])
    dat_array[:,0] = xy_test['Anon Student Id']
    dat_array[:,1] = xy_test['Problem Name']
    dat_array[:,2:4] = xy_test['Problem Hierarchy']
    dat_array[:,8] = np.array(xy_test['Problem View'],dtype=int)
    dat_array[:,14] = ld.check_final_answer(xy_test['Step Name'])
    dat_array[:,15] = np.array(xy_test['Row'],dtype=int)
    
    if master == True:
        nsize = len(xy_test['Anon Student Id'])
        
        # Process time strings to seconds
        for i in range(4):
            print 'Processing ' + time_strings[i]
            xy_test[time_strings[i]] = ld.convert_times(xy_test[time_strings[i]])
            
        # Convert durations to seconds, 0 if not present    
        for i in range(len(dur_strings)):
            print 'Processing '+dur_strings[i]
            for j in range(nsize):
                if xy_test[dur_strings[i]][j] == '':
                    xy_test[dur_strings[i]][j] = 0.0
                else:
                    xy_test[dur_strings[i]][j] = float(xy_test[dur_strings[i]][j])
                
        dat_array[:,4] = np.array(xy_test['Incorrects'],dtype=int)
        dat_array[:,5] = np.array(xy_test['Hints'],dtype=int)
        dat_array[:,6] = np.array(xy_test['Corrects'],dtype=int)
        dat_array[:,7] = np.array(xy_test['Correct First Attempt'],dtype=int)
        dat_array[:,8] = np.array(xy_test['Problem View'],dtype=int)
        dat_array[:,9] = xy_test['Step Start Time']
        dat_array[:,10] = xy_test['First Transaction Time']
        dat_array[:,11] = xy_test['Correct Transaction Time']
        dat_array[:,12] = xy_test['Step End Time']
        dat_array[:,13] = np.array(xy_test['Step Duration (sec)'])
        
    # Process Knowledge components
    newTags = tg.string_tags(xy_test['KC(Default)'])
    
    #Better to make a copy of tags
    tags2 = list(tags)
    tags2.extend(newTags)

    # Process opportunity
    tag_array,opp_array = tg.tags_to_array(
                                xy_test['KC(Default)'],
                                xy_test['Opportunity(Default)'],
                                tags2)
    
    
    return xy_keys, all_dicts, dat_array, tags2, tag_array, opp_array
예제 #32
0
파일: main.py 프로젝트: aneeshdash/ALL
import re,assemble,link,loader,machine
files=[]
print 'Enter file names(in descending order of execution):'
while True:
	print 'File Name:',
	fname=raw_input()
	if fname is '':
		break;
	files.append(fname)
print 'Enter Offset:',
offset=int(raw_input())
main=files[-1].split('.')[0]+'.asm'
symbols=assemble.assemble(files)
link.linker(main, symbols)
loader.loader(main, offset)
machine.machine(main)
print 'Symbol Table:'
for key in symbols:
	print 'File: '+key
	print symbols[key]
예제 #33
0
def load_data(con):
    data_loader = loader.loader(con, source='disk', base_len=880)
    data = pd.DataFrame(data_loader.generator(group_id=True)).drop_duplicates(
        subset='sig_id', keep='first')
    data['fft'] = data.signal.map(lambda x: psd.psd(x.numpy().squeeze(), 0.25))
    return data
예제 #34
0
import numpy as np
from SimpleLinear import simple_linear_model
from MSE import calculate_MSE
from gradient_descent import gradient_descent
from gradient_descent import regularised_gradient_descent
from loader import loader
from normalise import normalise
from visualize import visualize

#initialise variables
n_epochs = 50
np.random.seed(0)

#Generate X and y matrices
X, y = loader("ex1data2")
X = normalise(X)  #normalise features
m = y.size  #get total number of data samples
X = np.hstack(
    (np.ones(m)[:, np.newaxis], X))  #add intercept values to the feature array
n = X.shape[1]  #get total number of features
print(X)
print(y)

#Split the data set into training set, cross-validation set and test set
X = np.vsplit(X, [int(0.6 * X.shape[0]), int(0.8 * X.shape[0])])
y = np.split(y, [int(0.6 * y.size), int(0.8 * y.size)])

X_train = X[0]
X_cv = X[1]
X_test = X[2]
예제 #35
0
def runload(offset=0):
	loader.loader(x, offset)
	sim.reg['PC'] = offset
예제 #36
0
파일: bdat.py 프로젝트: earnric/modules
        if shape == ():
            return me[0]
        return np.array(me)

    @CachedAttribute
    def ions(self):
        return IonList(list(d.ion for d in self.data))

    @CachedAttribute
    def ext_ions(self):
        addions = ('nt1', 'h1', 'he4', 'be8')
        ions = set(self.ions) | set(isotope.ion(i) for i in addions)
        return IonList(sorted(list(ions)))


load = loader(BDat, __name__ + '.load')
_load = _loader(BDat, __name__ + '.load')
loadbdat = load

class BDatRecord(object):
    """
    BDAT reaction dec record for one isotope

    TODO - add evaluation function (but needs weights)

    TODO - make individual reactions their own objects?
    """
    maxreact = 10

    def __init__(self,
                 *args,
예제 #37
0
#!/usr/bin/python

import loader as ld
import tagger as tg
import ID_assigner as ida
import numpy as np
import dtree as dt

[xy_keys,xy_train] = ld.loader(ld.trainDat)

nsize = len(xy_train['Anon Student Id'])


flt_s = ['Step Duration (sec)','Correct Step Duration (sec)','Error Step Duration (sec)']
for i in range(len(flt_s)):
    for j in range(nsize):
        if xy_train[flt_s[i]][j] == '':
            xy_train[flt_s[i]][j] = 0.0
        else:
            xy_train[flt_s[i]][j] = float(xy_train[flt_s[i]][j])


time_strings = ['Step Start Time','First Transaction Time','Correct Transaction Time','Step End Time']

for i in range(4):
    print 'Processing ' + time_strings[i]
    xy_train[time_strings[i]] = ld.convert_times(xy_train[time_strings[i]])

#id_strings = ['Anon Student Id','Step Name','Problem Name']

id_strings = ['Anon Student Id','Problem Name']
import loader
import data_manager
import xml_parser
import sklearn.metrics

if len(sys.argv) != 2:
    print('Usage: python triplet_test.py <config>')
    exit(0)

hyper_params = xml_parser.parse(sys.argv[1], flat=False)

# Construct the loader
loader_params = hyper_params['loader']
my_loader = loader.loader(path=loader_params['path'],
                          train_img=loader_params['train_img'],
                          train_lbl=loader_params['train_lbl'],
                          test_img=loader_params['test_img'],
                          test_lbl=loader_params['test_lbl'])

# Construct the data manager
data_manager_params = hyper_params['data_manager']
method = data_manager_params['sampling_method']

data_manager_params['loader'] = my_loader
my_data_manager = data_manager.data_manager(data_manager_params)

# Construct the network
network_params = hyper_params['network']
my_network = triplet.triplet_net(network_params)

# Test
예제 #39
0
                        help='results file name')

    args = parser.parse_args()

    cache = True

    if cache:
        with open(r'data/cache/filenames.json', 'r') as f:
            filenames = json.load(f, object_pairs_hook=OrderedDict)

        datasets = OrderedDict()
        for name, (X_filename, y_filename) in filenames.iteritems():
            # if 'DUD' not in name:
            #     continue

            datasets[name] = loader(X_filename, y_filename)

    else:
        datasets = get_datasets_with_dud(datafiles)
        filenames = OrderedDict()
        for name, (X, y) in datasets.iteritems():
            X_filename = r'data/cache/' + name + '_X'
            y_filename = r'data/cache/' + name + '_y'

            if issparse(X):
                X = X.toarray()
            if issparse(y):
                y = y.toarray()

            np.save(X_filename, X)
            np.save(y_filename, y)
예제 #40
0
def run_test(weights_file, test_file, classnum):
    if MONITOR == 1:
        monitor_names = [
            'image_out', 'conv1_1_out', 'conv1_2_out', 'pool1_out',
            'conv2_1_out', 'conv2_2_out', 'pool2_out', 'conv3_1_out',
            'conv3_2_out', 'conv3_3_out', 'pool3_out', 'conv4_1_out',
            'conv4_2_out', 'conv4_3_out', 'pool4_out', 'conv5_1_out',
            'conv5_2_out', 'conv5_3_out', 'fc6_out', 'fc7_out', 'fc8_out'
        ]
    else:
        monitor_names = []
    output_names = ['prob']
    output_names.extend(monitor_names)

    param_dict = np.load(weights_file, encoding='latin1',
                         allow_pickle=True).item()
    predictor = OfflinePredictor(
        PredictConfig(model=Model(classnum),
                      session_init=DictRestore(param_dict),
                      input_names=['input'],
                      output_names=output_names))

    if MONITOR == 1:
        NUM = 1
    else:
        NUM = 10000

    l = loader()
    l.load_cifar(test_file, num_samples=NUM, classes=classnum)
    images = np.zeros((NUM, IMAGE_SIZE, IMAGE_SIZE, 3))
    for i, ret in enumerate(l.ret):
        images[i, :, :, :] = ret[0]

    outputs = predictor([images])

    prob = outputs[0]

    correct_count = 0
    maxes = np.argmax(prob, axis=1)
    ## label and prediction
    for i, ret in enumerate(l.ret):
        print("Golden: {}, predicted: {}".format(ret[1], maxes[i]))
        if ret[1] == maxes[i]:
            correct_count += 1

    print(str(correct_count) + ' correct out of ' + str(NUM))

    index = 1
    for o in monitor_names:
        print(o + ', shape: ' + str(outputs[index].shape))

        if 'image' not in o and BITA >= 4:
            print(str(outputs[index]))

        if len(outputs[index].shape) == 4:
            file_name = o.split('/')[-1]
            print('Writing file... ' + file_name)
            if not os.path.exists('./log'):
                os.makedirs('./log')
            with open('./log/' + file_name + '.log', 'w') as f:
                for sample in range(0, outputs[index].shape[0]):
                    for h in range(0, outputs[index].shape[1]):
                        for w in range(0, outputs[index].shape[2]):
                            res = ''
                            for c in range(0, outputs[index].shape[3]):
                                if 'image' in file_name:
                                    res = hexFromInt(
                                        int(outputs[index][sample, h, w, c]),
                                        8) + '_' + res
                                else:
                                    res = hexFromInt(
                                        int(outputs[index][sample, h, w, c]),
                                        BITA) + '_' + res
                            f.write('0x' + res + '\n')
        index += 1
예제 #41
0
파일: main.py 프로젝트: kunaaljain/ALL-8085
def runload():
	loader.loader(x)