def test_sub_anttena(row, colomn, length): center_x = -57.75 + (row * 2 + length - 1) / 2.0 * 16.5 center_y = -9.5 - (colomn * 2 + length - 1) / 2.0 * 16.5 subAntenna = SquareArray(length, center_y, center_x) print(subAntenna.antenna_cor) subAntennaIndex = numpy.zeros(shape=length * length) for i in range(length * length): subAntennaIndex[i] = (i // length + colomn) * 8 + i % length + row offset = numpy.loadtxt(r'offsetfromstatic.txt') offset = torch.Tensor(offset).view(1, -1) trainPath = r'D:\dataset\data2' trainDataset = FileDataSet.FileDataset(trainPath + r'\traindata.txt', trainPath + r'\trainlabel.txt') inputs, labels = trainDataset[:] inputs = inputs[:, subAntennaIndex] offset = offset[:, subAntennaIndex] inputs = inputs - offset labels = labels / 10 inputs = inputs.view(-1, 9, 1) w = 360 h = 90 Al = torch.linspace(0, w - 1, w).view(1, w) / 180.0 * numpy.pi Al = torch.matmul(torch.ones(h, 1), Al).view(1, 1, w * h) Be = torch.linspace(0, h - 1, h).view(h, 1) / 180.0 * numpy.pi Be = torch.matmul( Be, torch.ones(1, w), ).view(1, 1, w * h) altruth = numpy.arctan2(labels[:, 1].numpy() - center_y, labels[:, 0].numpy() - center_x) / numpy.pi * 180 betruth = numpy.arcsin((labels[:, 2].numpy() + 2) / numpy.sqrt( (labels[:, 1].numpy() - center_y) * (labels[:, 1].numpy() - center_y) + (labels[:, 0].numpy() - center_x) * (labels[:, 0].numpy() - center_x) + (labels[:, 2].numpy() + 2) * (labels[:, 2].numpy() + 2))) / numpy.pi * 180 index = 10 print(betruth[index]) print(altruth[index]) #plt.imshow(p0[index,:].view(h,w)) for i in range(labels.shape[0]): index = i a = subAntenna.p0(Al, Be, inputs[index, :, :].view(1, 9, 1)).view(h, w).numpy() print( str(betruth[index]) + " " + str(altruth[index]) + " " + str(numpy.unravel_index(a.argmax(), a.shape)[0]) + " " + str(numpy.unravel_index(a.argmax(), a.shape)[1]))
def cal_offset(refanttena, trainPath): center_x = -57.54 + (0 * 2 + 8 - 1) / 2.0 * 16.44 center_y = 57.54 - (0 * 2 + 8 - 1) / 2.0 * 16.44 test = SquareArray(8, center_y, center_x) trainDataset = FileDataSet.FileDataset(trainPath + r'\traindata.txt', trainPath + r'\trainlabel2.txt') inputs, labels = trainDataset[:] # plt.plot(numpy.mod(inputs.numpy()[:, 24] - inputs.numpy()[:, 8], numpy.pi * 2)) # plt.show() d = test.theta_theory_XYZ( torch.Tensor(labels[:, 0] * 100).view(-1, 1), torch.Tensor(labels[:, 1] * 100).view(-1, 1), torch.Tensor(labels[:, 2] * 100).view(-1, 1)) print(d.shape) print(inputs.shape) # plt.plot(inputs.numpy()[:, 9]) # plt.figure() # # plt.plot(labels.cpu().numpy()[0::1, 0]) # plt.plot(labels.cpu().numpy()[0::1, 1]) # plt.plot( # numpy.mod(inputs.numpy()[:, 1] - inputs.numpy()[:, 8], numpy.pi * 2)) # plt.plot( # numpy.mod(inputs.numpy()[:, 0] - inputs.numpy()[:, 8], numpy.pi * 2)) # plt.plot(numpy.mod((d.numpy()[:, 0] - d.numpy()[:, 8]), numpy.pi * 2)) # plt.show() offset = numpy.zeros(shape=(64, 1)) for i in range(64): offset[i, 0] = numpy.median( numpy.mod((inputs.numpy()[:, i] - inputs.numpy()[:, refanttena] - (d.numpy()[:, i] - d.numpy()[:, refanttena])), numpy.pi * 2)) # plt.plot(offset) # plt.show() numpy.savetxt('offsetfromstatic.txt', offset)
def cal_offset(): center_x = -57.75 + (0 * 2 + 8 - 1) / 2.0 * 16.5 center_y = -9.5 - (0 * 2 + 8 - 1) / 2.0 * 16.5 test = SquareArray(8, center_y, center_x) trainPath = r'D:\dataset\datastatic' trainDataset = FileDataSet.FileDataset(trainPath + r'\traindata.txt', trainPath + r'\trainlabel.txt') inputs, labels = trainDataset[:] d = test.theta_theory_XYZ( torch.Tensor(labels[:, 0] / 10).view(-1, 1), torch.Tensor(labels[:, 1] / 10).view(-1, 1), torch.Tensor(labels[:, 2] / 10).view(-1, 1)) print(d.shape) print(inputs.shape) offset = numpy.zeros(shape=(64, 1)) for i in range(64): offset[i, 0] = numpy.median( numpy.mod( (inputs.numpy()[:, i] - (d.numpy()[:, i] - d.numpy()[:, 0])), numpy.pi * 2)) plt.plot(offset) plt.show() numpy.savetxt('offsetfromstatic.txt', offset)
class MyLoss(nn.Module): def __init__(self): super(MyLoss, self).__init__() self.cro = nn.CrossEntropyLoss() def forward(self, pred, truth): return self.cro(pred[:, :pm.OutputShape[0]], truth[:, 0, 0].cuda()) + self.cro( pred[:, pm.OutputShape[0]:], truth[:, 0, 1].cuda()) workMode = pm.learnMode dataMode = pm.dataMode testPath = r'D:\Documents\OptiTrack\7-2-1' fileDataset = FileDataSet.FileDataset(testPath + r'\traindata.txt', testPath + r'\trainlabel2.txt') #fileDataset.Uniform() #fileDataset.make_more(2,0.01) trainloader = torch.utils.data.DataLoader(fileDataset, batch_size=10, shuffle=True, num_workers=0) model = CHAModule.MyNet1(64, pm.picWidth) #modelAE = torch.load('c.core') #core = CHAModule.MyNet3() #criterion = nn.MSELoss() criterion = nn.CrossEntropyLoss(size_average=True) w = 64 h = 48
import torch from src.core import CenterCamera, Parameters as pm, AntennaArray as aa import torchvision import math from src.dataprocess import FileDataSet TestMoade = pm.LearningMode.Regression if __name__ == '__main__': #import TrainTestbed model = torch.load('a.core') model.eval() testPath = r'E:\DataTest' testDataset = FileDataSet.FileDataset(testPath + r'\testdata.txt', testPath + r'\testlabel.txt') testloader = torch.utils.data.DataLoader(testDataset, batch_size=1, shuffle=False, num_workers=0) inputs, labels = testDataset[:] square_array = aa.SquareArray() cam = CenterCamera.Camera() w = int(cam.s[0, 0]) h = int(cam.s[1, 0]) pixalX = torch.linspace(0, w - 1, w).view(1, w) pixalX = torch.matmul(torch.ones(h, 1), pixalX).view(1, w * h) pixalY = torch.linspace(0, h - 1, h).view(h, 1) pixalY = torch.matmul( pixalY, torch.ones(1, w), ).view(1, w * h)
import torch from torch.autograd import Variable import matplotlib.pyplot as plt from src.core import Parameters as pm from src.dataprocess import FileDataSet TestMoade = pm.LearningMode.Regression if __name__ == '__main__': #import TrainTestbed model = torch.load('b.core') model.eval() testPath = r'E:\Data7' testDataset = FileDataSet.FileDatasetRNN(testPath + r'\traindata.txt', testPath + r'\trainlabel.txt', 10) testloader = torch.utils.data.DataLoader(testDataset, batch_size=1, shuffle=False, num_workers=0) for i, data in enumerate(testloader, 0): if i % 100 != 0: continue inputs, labels = data if torch.cuda.is_available(): inputs = inputs.cuda() labels = labels.cuda() outputs = model(Variable(inputs)) a = outputs.view(-1, 3072).cpu().detach().numpy() b = labels.cpu().numpy() print(b) #plt.plot(b / 10)
def test_sub_anttena_Pixal(row, colomn, length, trainPath): center_x = -57.54 + (colomn * 2 + length - 1) / 2.0 * 16.44 center_y = 57.54 - (row * 2 + length - 1) / 2.0 * 16.44 subAntenna = SquareArray(length, center_y, center_x) print(subAntenna.antenna_cor) subAntennaIndex = numpy.zeros(shape=length * length) for i in range(length * length): subAntennaIndex[i] = (i // length + colomn) * 8 + i % length + row offset = numpy.loadtxt(r'offsetfromstatic.txt') offset = torch.Tensor(offset).view(1, -1) trainDataset = FileDataSet.FileDataset(trainPath + r'\traindata.txt', trainPath + r'\trainlabel2.txt') inputs, labels = trainDataset[:] inputs = inputs[:, subAntennaIndex] offset = offset[:, subAntennaIndex] inputs = inputs - offset labels = labels * 100 inputs = inputs.view(-1, length * length, 1) w = 600 h = 600 Al = torch.linspace(0, w - 1, w).view(1, w) Al = torch.matmul(torch.ones(h, 1), Al).view(1, 1, w * h) Be = torch.linspace(0, h - 1, h).view(h, 1) Be = torch.matmul( Be, torch.ones(1, w), ).view(1, 1, w * h) camera = CenterCamera.Camera Al, Be = camera.getAlBefromPixal(camera, Al, Be) altruth = numpy.arctan2(labels[:, 1].numpy() - center_y, labels[:, 0].numpy() - center_x) / numpy.pi * 180 betruth = numpy.arcsin((labels[:, 2].numpy() + 2) / numpy.sqrt( (labels[:, 1].numpy() - center_y) * (labels[:, 1].numpy() - center_y) + (labels[:, 0].numpy() - center_x) * (labels[:, 0].numpy() - center_x) + (labels[:, 2].numpy() + 2) * (labels[:, 2].numpy() + 2))) / numpy.pi * 180 index = 10 print(betruth[index]) print(altruth[index]) xtruth, ytruth = camera.getPixalFromAlBe(camera, altruth, betruth) b = list() #plt.imshow(p0[index,:].view(h,w)) imagepath = trainPath + r'\AOAPix\aoamap' + str(row) + '-' + str( colomn) + '-' + str(length) folder = os.path.exists(imagepath) if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹 os.makedirs(imagepath) for i in range(0, labels.shape[0], 10): index = i a = subAntenna.p0(Al, Be, inputs[i, :, :].view(1, length * length, 1)).view(h, w) torchvision.utils.save_image( a, imagepath + '\\' + str(int(i / 10)) + '.jpg') a = a.numpy() b.append([ xtruth[index], ytruth[index], numpy.unravel_index(a.argmax(), a.shape)[1], numpy.unravel_index(a.argmax(), a.shape)[0] ]) #plt.show() b = numpy.array(b) numpy.savetxt(imagepath + r'\b.txt', numpy.array(b)) numpy.savetxt(imagepath + r'\label.txt', labels[::10, ].numpy())
import torch from torch.autograd import Variable import numpy as np import matplotlib.pyplot as plt from src.core import Parameters as pm from src.dataprocess import FileDataSet TestMoade = pm.LearningMode.Regression if __name__ == '__main__': #import TrainTestbed # model = torch.load('c.core') #model.eval() trainPath = r'D:\dataset\data3' trainDataset = FileDataSet.FileDataset(trainPath + r'\traindata.txt', trainPath + r'\trainlabel.txt') trainloader = torch.utils.data.DataLoader(trainDataset, batch_size=1, shuffle=False, num_workers=0) testPath = r'D:\dataset\data1' testDataset = FileDataSet.FileDataset(testPath + r'\traindata.txt', testPath + r'\trainlabel.txt') testloader = torch.utils.data.DataLoader(testDataset, batch_size=1, shuffle=False, num_workers=0) inputs, labels = testDataset[:] inputs2, labels2 = trainDataset[:] #inputs = model(Variable(inputs.cuda())).detach().cpu()
def TestOneTag(epc): model = torch.load('a.core') model.eval() testPath = r'E:\Data20' testDataset = FileDataSet.FileDataset( testPath + r'\traindata' + str(epc) + '.txt', testPath + r'\trainlabel' + str(epc) + '.txt') testloader = torch.utils.data.DataLoader(testDataset, batch_size=1, shuffle=False, num_workers=0) criterion = nn.MSELoss() modelAE = torch.load('c.core') # randindex = torch.linspace(1,100,100)#np.random.randint(0, 80, size=[10]) # for i in randindex: # inputs, labels = testDataset[int(i)] # labels=labels.view(1,1,2) # one_hot = torch.zeros(1, 640).scatter_(1, labels.data[:,:,0],1) # inputs, labels = Variable(inputs), Variable(one_hot.view(-1, 640)) # if torch.cuda.is_available(): # inputs = inputs.cuda() # labels = labels.cuda() # outputs = core(Variable(inputs )) # scal = (torch.Tensor([640, 480]).view(1, 2)).cuda() # # #print(outputs.data * scal) # print(outputs.data) # print(labels) # print('========') # inputs, labels = testDataset[:] # outputs=core(Variable(inputs.cuda())) # #scal = (torch.Tensor([640, 480]).view(1, 2)).cuda() # #np.savetxt('a.txt', (outputs.data*scal).cpu().numpy(), fmt='%.6f') # np.savetxt('a.txt', (outputs.data).cpu().numpy(), fmt='%.6f') # for epoch in range(1): # loop over the dataset multiple times # # running_loss = 0.0 # for i, data in enumerate(testloader, 0): # # get the inputs # inputs, labels = data # one_hot = torch.zeros(labels.size(0), 640).scatter_(1, labels.data[:, :, 0], 1) # inputs, labels = Variable(inputs), Variable(one_hot.view(-1, 1, 640)) # # # wrap them in Variable # #inputs, labels = Variable(inputs ), Variable(labels /torch.Tensor([640,480]).view(1,2)) # if torch.cuda.is_available(): # inputs = inputs.cuda() # labels = labels.cuda() # # # outputs = core(inputs) # loss = criterion(outputs, labels) # # # # print statistics # running_loss += loss.data # if i % 20 == 19: # print every 2000 mini-batches # print('[%d, %5d] loss: %.5f' % # (epoch + 1, i + 1, running_loss / 20)) # running_loss = 0.0 # # break # # print('Finished Training') if TestMoade == pm.LearningMode.Classification1LabelHeatMap: inputs, labels = testDataset[:] if torch.cuda.is_available(): inputs = inputs.cuda() labels = labels.cuda() # outputs = core(Variable(modelAE.encoder(inputs))) outputs = model(Variable((inputs))) a = outputs.cpu().detach().numpy().transpose() # x = F.softmax(torch.Tensor(a[:pm.OutputShape[0],:]),0) # values, indices = torch.max(x, 0) # y = F.softmax(torch.Tensor(a[pm.OutputShape[0]:, :]), 0) # values2, indices2 = torch.max(y, 0) # index =torch.cat((indices.view(-1,1),indices2.view(-1,1)),1) # np.savetxt('a.txt',index.numpy().astype(int)) # np.savetxt('b.txt', x.numpy()) b = labels.cpu().numpy()[0::10, 0] plt.plot(b / 10) # plt.figure() # for i in range(100): # plt.imshow(a[:, i * 10].reshape(48, 64)) # plt.show() # plt.imshow(a[:, 1].reshape(48, 64)) plt.imshow(np.sum(a[:, 0::10].reshape(48, 64, -1), 0)) temp = np.unravel_index(np.argmax(a[:, 0::10], 0), (48, 64)) plt.plot(temp[1]) plt.figure() b = labels.cpu().numpy()[0::10, 1] plt.plot(b / 10) # plt.figure() # for i in range(100): # plt.imshow(a[:, i * 10].reshape(48, 64)) # plt.show() # plt.imshow(a[:, 1].reshape(48, 64)) plt.imshow(np.sum(a[:, 0::10].reshape(48, 64, -1), 1)) temp = np.unravel_index(np.argmax(a[:, 0::10], 0), (48, 64)) plt.plot(temp[0]) plt.figure() r1 = labels.cpu().numpy()[0::10, 1] / 10 - temp[0] r2 = labels.cpu().numpy()[0::10, 0] / 10 - temp[1] plt.hist(np.abs(r1), 100) plt.figure() plt.hist(np.abs(r2), 100) plt.figure() plt.hist(np.abs(np.sqrt(r1 * r1 + r2 * r2)), 100) plt.figure() plt.plot(np.sqrt(r1 * r1 + r2 * r2)) print(np.mean(np.abs(r1))) print(np.mean(np.abs(r2))) temp = np.unravel_index(np.argmax(a[:, :], 0), (48, 64)) temp2 = np.hstack( (temp[1].reshape(-1, 1) * 10, temp[0].reshape(-1, 1) * 10)) np.savetxt(testPath + r'\modelResult' + str(epc) + '.txt', temp2) # for i, data in enumerate(testloader, 0): # if i % 100 != 0: # continue # inputs, labels = data # if torch.cuda.is_available(): # inputs = inputs.cuda() # labels = labels.cuda() # outputs = core(Variable(inputs)) # a = outputs.view(-1, 3072).cpu().detach().numpy() # b = labels.cpu().numpy() # print(b) # plt.imshow(a[:, :].reshape(48, 64)) # plt.show() elif TestMoade == pm.LearningMode.Regression: inputs, labels = testDataset[:] if torch.cuda.is_available(): inputs = inputs.cuda() labels = labels.cuda() outputs = model(Variable((inputs))) b = labels.cpu().numpy()[:, 0] plt.figure() plt.plot(b) plt.plot(outputs.detach().cpu().numpy()[:, 0] * 640) plt.figure() b = labels.cpu().numpy()[:, 1] plt.plot(b) plt.plot((outputs.detach().cpu().numpy()[:, 1]) * 240 + 240) temp2 = np.hstack( (outputs.detach().cpu().numpy()[:, 0].reshape(-1, 1) * 640, outputs.detach().cpu().numpy()[:, 1].reshape(-1, 1) * 240 + 240)) np.savetxt(testPath + r'\modelResult' + str(epc) + '.txt', temp2) elif TestMoade == pm.LearningMode.Classification2LabelsOneHot: inputs, labels = testDataset[:] if torch.cuda.is_available(): inputs = inputs.cuda() labels = labels.cuda() outputs = model(Variable(inputs)) a = outputs.cpu().detach().numpy().transpose() x = F.softmax(torch.Tensor(a[pm.OutputShape[0]:, :]), 0) # values, indices = torch.max(x, 0) # y = F.softmax(torch.Tensor(a[pm.OutputShape[0]:, :]), 0) plt.imshow(x * x) b = labels.cpu().numpy()[:, 1] plt.plot(b) aa = np.argmax(x[:, 0::1], 0) plt.plot(aa.numpy()) plt.show()