コード例 #1
0
 def test(self, df, sample_size, test_size, batch_size):
     for i in range(batch_size):
         TestModel = QModel(self.sess, self.theta, self.num_features,
                            self.num_outputs, self.num_layers)
         train_start = 0
         train_end = train_start + sample_size
         XTest, YTest = self.sample_points(df, sample_size + test_size)
         TestModel.fit(XTest[:train_end], YTest[:train_end])
         print(
             "Testing Now +_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_+_"
         )
         YPred = TestModel.predict(XTest[train_end:])
         YPred = [self.classify(pred) for pred in YPred]
         Y = np.array(YTest, YPred)
         correct = 0
         for index in range(0, test_size):
             if [YPred[index]] == YTest[train_end + index]: correct += 1
         accuracy = (correct / self.num_test_samples) * 100
         print("Predicted {}".format(YPred))
         print("Actual {}".format(YTest[train_end:]))
         print("Accuracy {}%\n".format(accuracy))
         np.savetext(
             'Results/results{}batch{}.csv'.format(self.test_called, i), Y)
         # with open('Results/results{}batch{}.csv'.format(self.test_called, i), 'w') as csvfile:
         #     filewriter = csv.writer(csvfile, delimiter=',',
         #                     quotechar='|', quoting=csv.QUOTE_MINIMAL)
         #     filewriter.writerow(['YTest','YPred'])
         #     for i in range (len(YPred)):
         #         filewriter.writerow([YTest[train_end+i][0], YPred[i]])
         self.test_called += 1
コード例 #2
0
ファイル: extract_weight.py プロジェクト: hoangt/caffe
def extract_caffe_model(model, weights, output_path, output_type):
    """extract caffe model's parameters to numpy array, and write them to files
  Args:
    model: path of '.prototxt'
    weights: path of '.caffemodel'
    output_path: output path of numpy params 
  Returns:
    None
  """
    net = caffe.Net(model, caffe.TEST)
    net.copy_from(weights)

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    for item in net.params.items():
        name, layer = item
        print('convert layer: ' + name)

        num = 0
        for p in net.params[name]:
            fname = output_path + '/' + str(name) + '_' + str(num)
            if (output_type == 'a' or output_type == 'n'):
                np.save(fname, p.data)
            if (output_type == 'a' or output_type == 't'):
                np.savetext(fname + '.txt', p.data)
            if (output_type == 'a' or output_type == 'h'):
                print('>>> RFU')
            num += 1
コード例 #3
0
def A_2D_mash_grid():
    max_steps = 20000
    experiment_parameters = {"max_steps": max_steps, "num_runs": 50}

    # Environment parameters
    environment_parameters = {}

    agent_parameters = {
        "num_tilings": [32],
        "num_tiles": [8],
        "actor_step_size": [2**(-2)],
        "critic_step_size": [2**1],
        "avg_reward_step_size": [2**(-5)],
        "num_actions": 3,
        "iht_size": 4096
    }
    #avg 2^-8, 2^-7, 2^-6, 2^-5, 2^-4
    max_i = 8
    max_j = 8
    actor_range = np.linspace(2 ^ -6, 2 ^ 1, num=max_i)
    critic_step_size_range = np.linspace(2**-4, 2**2, num=max_j)
    result_exp_error = np.empty([max_i, max_j])
    test_scope = 5000  # we test the exponential return on the last 50000 time steps.
    current_env = PendulumEnvironment
    current_agent = ActorCriticSoftmaxAgent
    file_type = "exp_avg_reward"
    directory = "results_actor_critic"
    i = 0
    for actor_step_size in actor_range:  #ass is actor step size
        j = 0
        for critic_step_size in critic_step_size_range:  #ass is actor step size
            agent_parameters["actor_step_size"] = [actor_step_size]
            agent_parameters["critic_step_size"] = [critic_step_size]
            run_experiment(current_env, current_agent, environment_parameters,
                           agent_parameters, experiment_parameters)
            data = get_data(agent_parameters, directory, file_type)
            data_mean = np.mean(data, axis=0)
            data_mean = data_mean[-1 * test_scope:-1]
            result_exp_error[i][j] = np.average(data_mean)
            j += 1
        i += 1
    print(actor_range)
    print(critic_step_size_range)
    print(result_exp_error)
    np.savetext("20000_run_avg_reward_step_size_2^-5-_policy_gradient.txt",
                result_exp_error)
    print(experiment_parameters)
    print(agent_parameters)
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    ax.plot_surface(actor_range,
                    critic_step_size_range,
                    result_exp_error,
                    cmap='viridis',
                    edgecolor='none')
    ax.set_title('Surface plot')
    plt.show()
コード例 #4
0
	def extractDualRawTraces(self, lines, saveTraces=False, traceFilename = 'trace_'):
		
		channel1Data = self.extractRawTraces(lines[0])
		channel2Data = self.extractRawTraces(lines[1])
		returnData = []
		
		for i, element in enumerate(channel1Data):
			returnData.append(self.packageTrace(channel1Data[i],channel2Data[i]))
			if saveTraces == True:
				np.savetext(traceFilename+str(i)+returnData[i]+'.txt',fmt='%.6e')
		
		return returnData
コード例 #5
0
ファイル: nn.py プロジェクト: dhruvkhattar/Neural-Network
    def run(self):

        ct = 0
        while ct < 100:
            test = 0
            for it in range(len(self.data)):
                self.layers[0].unit[1:] = self.data[it]
                net.forwardPropagation()
                test += np.dot(self.T[it] - self.layers[LAYERS - 1].unit,
                               self.T[it] - self.layers[LAYERS - 1].unit)
                net.backPropagation(it)
            print 'Iteration: ', ct
            print 'Training Error: ', test / 2
            self.test()
            ct += 1
        np.save('hw', [self.layers[1].w, self.layers[2].w])
        np.savetext('hw1', self.layers[1].w)
        np.savetext('hw2', self.layers[2].w)
コード例 #6
0
 def genData():
     k = 0
     for i in range(0, 1000):
         for j in range(1, 3):
             if j == 1:
                 k = 0
             elif j == 2:
                 k = 1
             filename = 'data/rawData/' + str(k) + 'raw' + str(i) + '.txt'
             imgname = 'data/lightCurvePlots/' + str(k) + 'fig' + str(
                 i) + '.png'
             ##Added SVM support
             statfile = 'data/stellarProperties/' + str(k) + 'prop' + str(
                 i) + '.txt'
             properties = []
             if k == 0:
                 stats = pd.read_csv('planetstarstats.csv')
                 properties = [
                     np.random.normal(stats['means'].iloc[0],
                                      stats['sdevs'].iloc[0]),
                     np.random.normal(stats['means'].iloc[1],
                                      stats['sdevs'].iloc[1]),
                     np.random.normal(stats['means'].iloc[2],
                                      stats['sdevs'].iloc[2]),
                     np.random.normal(stats['means'].iloc[3],
                                      stats['sdevs'].iloc[3])
                 ]
             elif k == 1:
                 stats = pd.read_csv('noplanetstarstats.csv')
                 properties = [
                     np.random.normal(stats['means'].iloc[0],
                                      stats['sdevs'].iloc[0]),
                     np.random.normal(stats['means'].iloc[1],
                                      stats['sdevs'].iloc[1]),
                     np.random.normal(stats['means'].iloc[2],
                                      stats['sdevs'].iloc[2]),
                     np.random.normal(stats['means'].iloc[3],
                                      stats['sdevs'].iloc[3])
                 ]
             np.savetext(statfile, properties)
             np.savetxt(filename, data.results[i][j])
             plt.plot(data.results[i][j])
             plt.savefig(imgname)
             plt.close()
コード例 #7
0
ファイル: MINST.py プロジェクト: gli-27/cvproj
def load_mnist_label(path, fileName, type='train'):
    filePath = os.path.join(path, fileName)
    fp = open(filePath, 'rb')
    buf = fp.read()
    index = 0
    magic, num = struct.unpack_from('>II', buf, index)
    index += struct.calcsize('>II')
    Labels = np.zeros(num)

    for i in range(num):
        Labels[i] = np.array(struct.unpack_from('>B', buf, index))
        index += struct.calcsize('>B')

    if (type == 'train'):
        np.savetxt('./train_labels.csv', Labels, fmt='%i', delimiter=',')
    if (type == 'test'):
        np.savetext('./test_labels.csv', Labels, fmt='%i', delimiter=',')

    return Labels
コード例 #8
0
def main(train_path, valid_path, save_path):
    """Problem: Logistic regression with Newton's Method.

    Args:
        train_path: Path to CSV file containing dataset for training.
        valid_path: Path to CSV file containing dataset for validation.
        save_path: Path to save predicted probabilities using np.savetxt().
    """
    x_train, y_train = util.load_dataset(train_path, add_intercept=True)
    x_valid, y_valid = util.load_dataset(valid_path, add_intercept=True)
    # *** START CODE HERE ***
    # Train a logistic regression classifier
    logistic = LogisticRegression()
    logistic.fit(x_train, y_train)
    y_valid = logistic.predict(x_valid)

    # Plot decision boundary on top of validation set set
    # Use np.savetxt to save predictions on eval set to save_path
    np.savetext(save_path, y_valid)
コード例 #9
0
ファイル: scripts.py プロジェクト: mosbasik/andykeller
def dta_top_Q_users(dta_path, Q):
    '''
    Creates a new version of the specified dta file containing only entries of
    the top Q users.
    '''

    path = os.path.dirname(dta_path)
    basename = os.path.basename(dta_path)
    plainname = '.'.join(basename.split('.')[:-1])
    
    dta = np.loadtxt(dta_path, dtype=float) # load up the file

    dta_counts = np.zeros((dta.shape[0],))  # make counts array full of zeros

    for point in dta:
        dta_counts[point[0]] += 1           # add 1 to bin for every occurance

    # http://stackoverflow.com/questions/6910641/how-to-get-indices-of-n-maximum-values-in-a-numpy-array
    dta_indices = np.argpartition(dta_counts, -Q)[-Q:] # array of dta_count indices containing topQ user ids

    dta_topQusers = dta_counts[dta_indices]     # array of topQ user ids

    topQ = np.zeros((dta.length, dta[0].length))     # make topQ array full of zeros

    count = 0                               # count of points by topQ users

    for i, point in enumerate(dta):         # loop through all points in dta
        if point[0] in dta_topQusers:       # if the user is a top Q user
            topQ[i] = point                 # save that point into topQ
            count += 1                      # count is incremented

    topQshort = np.zeros((count, dta[0].length))

    for i, point in enumerate(topQ):
        if point[0] == 0:
            print 'breakpoint reached'
            break
        topQshort[i] = point


    np.savetext(path + '/' + plainname + '_top' + Q + 'users.dta', topQ)
コード例 #10
0
ファイル: record_sinks.py プロジェクト: sevenreek/qtadqscope
 def on_record(self, header: ct.Structure, samples: np.ndarray):
     np.savetext(self.output_file, samples.astype(int), fmt="%d")
コード例 #11
0
class ChexnetTrainer():

    #---- Train the densenet network
    #---- pathDirData - path to the directory that contains images
    #---- pathFileTrain - path to the file that contains image paths and label pairs (training set)
    #---- pathFileVal - path to the file that contains image path and label pairs (validation set)
    #---- nnArchitecture - model architecture 'DENSE-NET-121', 'DENSE-NET-169' or 'DENSE-NET-201'
    #---- nnIsTrained - if True, uses pre-trained version of the network (pre-trained on imagenet)
    #---- nnClassCount - number of output classes
    #---- trBatchSize - batch size
    #---- trMaxEpoch - number of epochs
    #---- transResize - size of the image to scale down to (not used in current implementation)
    #---- transCrop - size of the cropped image
    #---- launchTimestamp - date/time, used to assign unique name for the checkpoint file
    #---- checkpoint - if not None loads the model and continues training

    def train(pathDirData, pathFileTrain, pathFileVal, nnArchitecture,
              nnIsTrained, nnClassCount, trBatchSize, trMaxEpoch, transResize,
              transCrop, launchTimestamp, checkpoint):

        #-------------------- SETTINGS: NETWORK ARCHITECTURE
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'resnet':
            model = ResNeXt(14).cuda()
        elif nnArchitecture == 'dcsnnet':
            model = DCSNNet(14).cuda()

        model = torch.nn.DataParallel(model).cuda()

        #-------------------- SETTINGS: DATA TRANSFORMS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        transformList = []
        transformList.append(transforms.RandomResizedCrop(transCrop))
        transformList.append(transforms.RandomHorizontalFlip())
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)
        transformSequence = transforms.Compose(transformList)

        #-------------------- SETTINGS: DATASET BUILDERS
        datasetTrain = DatasetGenerator(
            pathImageDirectory=pathDirData,
            pathDatasetFile=pathFileTrain,
            transform=transformSequence)
        datasetVal = DatasetGenerator(
            pathImageDirectory=pathDirData,
            pathDatasetFile=pathFileVal,
            transform=transformSequence)

        dataLoaderTrain = DataLoader(
            dataset=datasetTrain,
            batch_size=trBatchSize,
            shuffle=True,
            num_workers=24,
            pin_memory=True)
        dataLoaderVal = DataLoader(
            dataset=datasetVal,
            batch_size=trBatchSize,
            shuffle=False,
            num_workers=24,
            pin_memory=True)

        #-------------------- SETTINGS: OPTIMIZER & SCHEDULER
        optimizer = optim.Adam(
            model.parameters(),
            lr=0.0001,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=1e-5)
        scheduler = ReduceLROnPlateau(
            optimizer, factor=0.1, patience=5, mode='min')

        #-------------------- SETTINGS: LOSS
        loss = torch.nn.BCELoss(size_average=True)

        #---- Load checkpoint
        if checkpoint != None:
            modelCheckpoint = torch.load(checkpoint)
            model.load_state_dict(modelCheckpoint['state_dict'])
            optimizer.load_state_dict(modelCheckpoint['optimizer'])

        #---- TRAIN THE NETWORK

        lossMIN = 100000

        for epochID in range(0, trMaxEpoch):

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampSTART = timestampDate + '-' + timestampTime

            ChexnetTrainer.epochTrain(model, dataLoaderTrain, optimizer,
                                      scheduler, trMaxEpoch, nnClassCount,
                                      loss)
            lossVal, losstensor = ChexnetTrainer.epochVal(
                model, dataLoaderVal, optimizer, scheduler, trMaxEpoch,
                nnClassCount, loss)

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampEND = timestampDate + '-' + timestampTime

            scheduler.step(losstensor.data[0])

            if lossVal < lossMIN:
                lossMIN = lossVal
                torch.save({
                    'epoch': epochID + 1,
                    'state_dict': model.state_dict(),
                    'best_loss': lossMIN,
                    'optimizer': optimizer.state_dict()
                }, 'm-' + launchTimestamp + '.pth.tar')
                print('Epoch [' + str(epochID + 1) + '] [save] [' +
                      timestampEND + '] loss= ' + str(lossVal))
            else:
                print('Epoch [' + str(epochID + 1) + '] [----] [' +
                      timestampEND + '] loss= ' + str(lossVal))

    #--------------------------------------------------------------------------------

    def epochTrain(model, dataLoader, optimizer, scheduler, epochMax,
                   classCount, loss):

        model.train()

        for batchID, (input, target) in enumerate(dataLoader):

            target = target.cuda(async=True)

            varInput = torch.autograd.Variable(input)
            varTarget = torch.autograd.Variable(target)
            varOutput = model(varInput)

            lossvalue = loss(varOutput, varTarget)

            optimizer.zero_grad()
            lossvalue.backward()
            optimizer.step()

    #--------------------------------------------------------------------------------

    def epochVal(model, dataLoader, optimizer, scheduler, epochMax, classCount,
                 loss):
        model.eval()

        lossVal = 0
        lossValNorm = 0

        losstensorMean = 0

        for i, (input, target) in enumerate(dataLoader):

            target = target.cuda(async=True)

            varInput = torch.autograd.Variable(input, volatile=True)
            varTarget = torch.autograd.Variable(target, volatile=True)
            varOutput = model(varInput)

            losstensor = loss(varOutput, varTarget)
            losstensorMean += losstensor

            lossVal += losstensor.data[0]
            lossValNorm += 1

        outLoss = lossVal / lossValNorm
        losstensorMean = losstensorMean / lossValNorm

        return outLoss, losstensorMean

    #--------------------------------------------------------------------------------

    #---- Computes area under ROC curve
    #---- dataGT - ground truth data
    #---- dataPRED - predicted data
    #---- classCount - number of classes

    def computeAUROC(dataGT, dataPRED, classCount):

        outAUROC = []

        datanpGT = dataGT.cpu().numpy()
        datanpPRED = dataPRED.cpu().numpy()

        for i in range(classCount):
            outAUROC.append(roc_auc_score(datanpGT[:, i], datanpPRED[:, i]))

        return outAUROC

    #--------------------------------------------------------------------------------

    #---- Test the trained network
    #---- pathDirData - path to the directory that contains images
    #---- pathFileTrain - path to the file that contains image paths and label pairs (training set)
    #---- pathFileVal - path to the file that contains image path and label pairs (validation set)
    #---- nnArchitecture - model architecture 'DENSE-NET-121', 'DENSE-NET-169' or 'DENSE-NET-201'
    #---- nnIsTrained - if True, uses pre-trained version of the network (pre-trained on imagenet)
    #---- nnClassCount - number of output classes
    #---- trBatchSize - batch size
    #---- trMaxEpoch - number of epochs
    #---- transResize - size of the image to scale down to (not used in current implementation)
    #---- transCrop - size of the cropped image
    #---- launchTimestamp - date/time, used to assign unique name for the checkpoint file
    #---- checkpoint - if not None loads the model and continues training

    def test(pathDirData, pathFileTest, pathModel, nnArchitecture,
             nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop,
             launchTimeStamp):

        CLASS_NAMES = [
            'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
            'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
            'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'
        ]

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
       if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'resnet':
            model = ResNeXt(10).cuda()
        elif nnArchitecture == 'dcsnnet':
            model = DCSNNet(10).cuda()

        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['state_dict'])

        model.load_state_dict({
            k.replace('module.', ''): v
            for k, v in modelCheckpoint['state_dict'].items()
        })

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(
                lambda crops: torch.stack([normalize(crop) for crop in crops]))
        )
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(
            pathImageDirectory=pathDirData,
            pathDatasetFile=pathFileTest,
            transform=transformSequence)
        dataLoaderTest = DataLoader(
            dataset=datasetTest,
            batch_size=trBatchSize,
            num_workers=8,
            shuffle=False,
            pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()

        for i, (input, target) in enumerate(dataLoaderTest):

            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(
                input.view(-1, c, h, w).cuda(), volatile=True)

            out = model(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)

            outPRED = torch.cat((outPRED, outMean.data), 0)

            print(outPRED)

        np.savetext("np.txt", outGT)
        np.savetext("np2.txt", outPRED)

        aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED,
                                                      nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        print('AUROC mean ', aurocMean)

        for i in range(0, len(aurocIndividual)):
            print(CLASS_NAMES[i], ' ', aurocIndividual[i])

        return
コード例 #12
0
ファイル: lda.py プロジェクト: wintony/CACostalCommission
lda_model = LatentDirichletAllocation(n_components=20,
                                      learning_method='online',
                                      random_state=0,
                                      verbose=0)

matrixFile = 'lsa_matrix_2016.txt'
matrixNums = []
f = open(matrixFile)
for line in f:
    matrixNums.append(line)

matrixNums = [float(i) for i in matrixNums]
matrixList = []

csvFile = open(inputFile, encoding='ISO-8859-1')
reader = csv.reader(csvFile)
numDocs = 0
for row in reader:
    if row[yearColNum] == '2016':
        numDocs += 1

for i in range(len(matrixNums)):
    row = []
    for j in range(0, numDocs):
        row.append(matrixNums[j])
    matrixList.append(row)

matrix = np.array(matrixList)
np.savetext('lsa_matrix_2016_condensed.txt')

lda_topic_matrix = lda_model.fit_transform(matrix)
コード例 #13
0
 def save_length(self,path):
     np.savetext(path, self.length_t)
コード例 #14
0
import numpy as np
from PIL import Image
import time
import cv2 as cv

prev_frame = ''
next_fram = ''

im1 = np.array(Image.open(prev_frame))
im1 = im1.astype(float) / 256.0

im2 = np.array(Image.open(curr_frame))
im2 = im2.astype(float) / 256.0

np.savetext('', u, delimiter='\n')
np.savetext('', v, delimiter='\n')

flow = np.concatenate((u[..., None], v[..., None]), axix=2)

#plot using OpenCV
hsv = np.zeros(im1.shape, dtype=np.uint8)
hsv[:, :, 0] = 255
hsv[:, :, 1] = 255
mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 1] = ang * 180 / np.pi / 2
hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)

rgb = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
コード例 #15
0
 def month_to_year(self, output_address):
     annual_data = []
     for count_row in xrange(0, len(self.text_data)):
         if (count_row-1) % 12 == 0:
             annual_data.append(self.text_data[count_row])
     np.savetext(output_address, annual_data, delimeter=",")
コード例 #16
0
import sys
history = [add_song[0]]


def min(a):
    global history
    minimum = float(sys.float_info.max)
    mini = minimum
    ret = a
    if len(history) == len(add_song):
        return None
    for i in add_song:
        tmp = distance(a, i)
        if (a != i and not (i in history) and float(tmp) < mini):
            mini = tmp
            ret = i
    history.append(ret)
    if (ret != a):
        return min(ret)
    else:
        return None


(min(add_song[0]))

# In[ ]:

np.savetext('Euclidean.txt', a, fmt='%.15f')  #Euclidean.txt
np.savetext('songList.txt', add_song, fmt='%s')  #songList.txt
コード例 #17
0
V1 = np.linspace(V_min, V_max, 10 * N_Volts + 1)
Imeas = np.zeros(N_Volts)

rm = visa.ResourceManager()
awg = rm.open_resource(awg_address)
dmm = rm.open_resource(dmm_address)

awg.write("OUTP:LOAD INF")

count = 0
for K in V:
    print("Applying %f Volts" % K)
    awg.write("APPL:DC DEF,DEF,%f" % K)
    time.sleep(1)
    Imeas[count] = dmm.query("MEAS:CURR:DC? 1e-1,1e-5")
    count = count + 1

Rest = V.dot(V) / V.dot(Imeas)
Iest = V1 / Rest

data = np.append(np.transpose([V]), np.transpose([Imeas * 1000]), axis=1)
np.savetext(filename, data, delimiter=',')

plt.plot(V, Imeas * 1000, 'bo', markersize=4, label='Measured')
plt.plot(V1, Iest * 1000, 'r-', linewidth=2, label='Fitted')
plt.grid()
plt.legend()
plt.xlabel("Voltage (V)")
plt.ylabel("Current (mA)")
plt.title("Estimated Resistance = " + '{:.0f}'.format(Rest) + r' $\Omega$')
plt.show()
コード例 #18
0
import index
import numpy as np 

if __name__ == '__main__':
    fineName = 'pendigits.all'
    dataSet = loadCsv('../data/%s.csv'%(fineName))
    a = []
    sub = 10000
    total = dataSet.shape[0]
    for item in dataSet:
        if np.random.rand() > float(sub)/total:
            a.append(item)
    
    np.savetext('../data/%s-part.csv'%(fineName),np.array(a),',')
コード例 #19
0

# skimage.color.rgb2gray と比較
img_conb2 = np.concatenate((img_yiq[:, :, 0], img_gray_01), axis=1)
io.imshow(img_conb2)


# In[47]:


# 反転画像も確認
img_nega = 255 - img_gray
io.imshow(img_nega)
#io.imshow(img_gray)
print(img_nega)
np.savetext('AAA.txt',img_nega,delimiter=',')


# #### グレースケール化されたデータの統計情報の確認

# In[45]:


print('pixel sum', np.sum(img_gray[:, :]))
print('pixel mean', np.mean(img_gray[:,:]))
print('pixel variance', np.var(img_gray[:,:]))
print('pixel stddev', np.std(img_gray[:,:]))


# #### ヒストグラムの確認
コード例 #20
0
ファイル: nData.py プロジェクト: GitWyd/KagglePBJ
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 00:37:09 2016

@author: Philippe
"""
import csv
import numpy as np
from cleandata import get_data
import pickle

X_data, X_quiz = get_data('data')
print('data loaded...')
pickle.dump(, file, protocol=None, *, fix_imports=True)
np.savetxt("data/clean_data.csv", X_data, delimiter=",")
print('clean_data.csv has been saved')
np.savetext("data/clean_quiz.csv", X_quiz, delimiter=",")
print('clean_quiz.csv has been saved')
print('DONE')
コード例 #21
0
ファイル: pynumber1.py プロジェクト: Pranay-Singhal/SummerML
import numpy as np
import random

rows = int(input("enter no. of rows:"))
columns = int(input("enter no. of columns:"))

x = np.random.rand(rows, columns)
np.savetext("file1.txt", x)
コード例 #22
0
 def month_to_year(self, output_address):
     annual_data = []
     for count_row in xrange(0, len(self.text_data)):
         if (count_row - 1) % 12 == 0:
             annual_data.append(self.text_data[count_row])
     np.savetext(output_address, annual_data, delimeter=",")
コード例 #23
0
ファイル: svm.py プロジェクト: verajohne/Kaggle
import numpy as np
from sklearn.svm import SVR


#tmp = 'silly/Kaggle/regression'
#X = np.load('Data/X.npy')
X = np.genfromtxt('Data/reg_train_in.csv', delimiter=',')
y = np.load('Data/reg_train_out.npy')
y = [x[0] for x in y]
y=np.array(y)
test_in = np.load('Data/reg_test_in.npy')

clf = SVR(C=1.0, epsilon =0.2)
clf.fit(X,y)
test_out = clf.predict(test_in)
np.save(test_out, 'svm')
np.savetext(test_out, 'svm')
コード例 #24
0
ファイル: svm.py プロジェクト: ambrishrawat/kaggle
import numpy as np
from sklearn.svm import SVR

#tmp = 'silly/Kaggle/regression'
#X = np.load('Data/X.npy')
X = np.genfromtxt('Data/reg_train_in.csv', delimiter=',')
y = np.load('Data/reg_train_out.npy')
y = [x[0] for x in y]
y = np.array(y)
test_in = np.load('Data/reg_test_in.npy')

clf = SVR(C=1.0, epsilon=0.2)
clf.fit(X, y)
test_out = clf.predict(test_in)
np.save(test_out, 'svm')
np.savetext(test_out, 'svm')
コード例 #25
0
import numpy as np
from macros_AWS import *
from sklearn import preprocessing
orgdata = np.load("testset7_21_2018.csv")
newdata = robust_scale(orgdata)
np.savetext("datasetafterrobustscaling.csv", newdata, delimiter=",")
コード例 #26
0
###############################################################

###############################################################
'''
print 'predicting ...'
predictData = test[0:79975, 1:406]
predict = rfr.predict(predictData)
#prediction = np.column_stack((test[0:79975, 0:1],predict))
joblib.dump(prediction, preddump)
'''
###############################################################

###############################################################
#Writing data to csv using numpy.savetext
'''
print 'writing ...'
galIDs = test[:,0]
prediction = np.column_stack(galIDs, predict)
headerRow = 'GalaxyID,Class1.1,Class1.2,Class1.3,Class2.1,Class2.2,Class3.1,Class3.2,Class4.1,Class4.2,Class5.1,Class5.2,Class5.3,Class5.4	,Class6.1,Class6.2,Class7.1,Class7.2,Class7.3,Class8.1,Class8.2,Class8.3,Class8.4,Class8.5,Class8.6,Class8.7,Class9.1,Class9.2,Class9.3,Class10.1,Class10.2,Class10.3,Class11.1,Class11.2,Class11.3,Class11.4,Class11.5,Class11.6'
formatting = '%d,%f.%f,%f,%f,%f,%f.%f,%f,%f,%f,%f.%f,%f,%f,%f,%f.%f,%f,%f,%f,%f.%f,%f,%f,%f,%f.%f,%f,%f,%f,%f.%f,%f,%f,%f,%f,$f'
np.savetext(solutionfilename, prediction. fmt=formatting, delimiter=',', newline='\n', header=headerRow, footer='', comments='')
'''
###############################################################

###############################################################
#Writing data to csv
'''
with open('Solutions/TestSubmission.csv', 'wb') as csvfile:
  writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
  for x in range(test.shape[0]):
コード例 #27
0
def save_all_pfy(runid, filename, mca_name):
    np.savetext(filename, d, save)
コード例 #28
0
ファイル: test.py プロジェクト: royxuhan/PyBookLib
"""

tag_ids, tag_names = np.loadtxt(open("tags.csv", "rb"),
                                delimiter=",",
                                skiprows=1,
                                usecols=(0, 1),
                                dtype='int,str',
                                unpack=True)
print(tag_ids)
print(tag_names)
tag_id_counts = np.vstack((tag_ids, np.zeros(34252, 'int')))
print(tag_id_counts)
print(tag_id_counts[0])

book_tags = np.loadtxt(open("book_tags.csv", "rb"),
                       delimiter=",",
                       skiprows=1,
                       usecols=(0, 1, 2),
                       dtype="int")
print(book_tags)

print(tag_id_counts[0].size)
for i in range(tag_id_counts[0].size):
    tag_id = tag_id_counts[0][i]
    for line in book_tags:
        if tag_id == line[1]:
            tag_id_counts[1][i] += line[2]
    print(tag_id_counts[1][i])
print(tag_id_counts)
np.savetext('tag_counts.csv', tag_id_counts, delimiter=',')