コード例 #1
0
ファイル: finderFunction.py プロジェクト: RedThorium/mysite
def finderFunction(boroughChoice):
# print(MyData())
  TheData = MyData()

  boroughNameList = []

  for datas in TheData:
      if datas['borough'] == boroughChoice:
          boroughNameList.append(datas['animalname'])

  #print(boroughNameList)
  return most_common(boroughNameList)
コード例 #2
0
#上色的rgb值
back = [0,0,0]#黑色
stalk = [0 ,92,230]#蓝色
twig = [0,255,0]#绿色
grain = [128,128,0]#黄褐色
grain2 = [0,128,0]
whit = [255,255,255]
zi = [128,0,255]
COLOR_DICT = np.array([back,zi,twig,stalk,grain,grain2,whit])
num_class=5

#数据路径
images = r'E:\RandomTasks\Dlinknet\dataset\train\images'
mask = r'E:\RandomTasks\Dlinknet\dataset\train\labels'
voc_val = MyData(images,mask)

batchsize = 16#计算批次大小
train_load = Data.DataLoader(
    voc_val,
    batch_size=batchsize,
    shuffle=True)

NAME = 'DinkNet34_class8_xiaomai'#数据模型
modefiles = 'weights/'+NAME+'.th'
write = SummaryWriter('weights')#可视化
loss = nn.NLLLoss()
#loss = nn.CrossEntropyLoss()
solver = MyFrame(DinkNet34, loss, 0.0003)#网络,损失函数,以及学习率
if os.path.exists(modefiles):
    solver.load(modefiles)
コード例 #3
0
ファイル: myset.py プロジェクト: gligorevic/OISISI-Python
 def __addToSet__(self, filepath, occ):
     for item in self.list:
         if item.abspath == filepath:
             item.brPonavljanja = item.brPonavljanja + occ
             return
     self.list.append(MyData(filepath, occ))
args = parse.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
    torch.cuda.manual_seed(1)
base_lr = args.lr

model = MobileNetV1(2, 0.75)
if args.cuda:
    model.cuda()
    cudnn.benchmark = True
transform = transforms.Compose([
    transforms.Resize((160, 160)),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
train_dataset = MyData(args.train_data, transforms=transform)
print(args.batch, len(train_dataset))
batch = args.batch
trainloader = DataLoader(train_dataset,
                         batch_size=batch,
                         shuffle=True,
                         num_workers=2,
                         collate_fn=collate_fn)
#test_dataset=MyData(args.test_data)
#testloader=DataLoader(test_dataset,batch_size=args.batch,shuffle=False,num_workers=4,collate_fn=collate_fn)
optimize = torch.optim.Adam([{
    'params': model.parameters()
}],
                            lr=args.lr,
                            betas=(args.beta1, args.beta2))
logs = open('logs.txt', 'a+')
コード例 #5
0
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
##判断gpu是否可用并生成随机种子
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)
model = Classification(2)
if args.cuda:
    model.cuda()
    cudnn.benchmark = True

batch_size = args.batch
##构建数据迭代器  训练
dataset = MyData(args.datas, transforms=normalize)
valid = MyData(args.test, transforms=normalize)
dataloader = DataLoader(dataset,
                        shuffle=True,
                        batch_size=batch_size,
                        num_workers=2,
                        collate_fn=collate_fn)
valid_dataloader = DataLoader(valid,
                              shuffle=True,
                              batch_size=batch_size,
                              num_workers=2,
                              collate_fn=collate_fn)
##定义优化器
optimizer = torch.optim.Adam([{
    'params': model.parameters()
}],
コード例 #6
0
ファイル: main.py プロジェクト: ManhPP/MachineTranslation
def main():
    input_lang, output_lang, pairs, data1, data2 = read_langs("eng", "fra", True)
    input_tensor = [[input_lang.word2index[s] for s in es.split(' ')] for es in data1]
    target_tensor = [[output_lang.word2index[s] for s in es.split(' ')] for es in data2]
    max_length_inp, max_length_tar = max_length(input_tensor), max_length(target_tensor)

    input_tensor = [pad_sequences(x, max_length_inp) for x in input_tensor]
    target_tensor = [pad_sequences(x, max_length_tar) for x in target_tensor]
    print(len(target_tensor))

    input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor,
                                                                                                    target_tensor,
                                                                                                    test_size=0.2)

    # Show length
    print(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val))

    BUFFER_SIZE = len(input_tensor_train)
    BATCH_SIZE = 64
    N_BATCH = BUFFER_SIZE // BATCH_SIZE
    embedding_dim = 256
    units = 1024
    vocab_inp_size = len(input_lang.word2index)
    vocab_tar_size = len(output_lang.word2index)

    train_dataset = MyData(input_tensor_train, target_tensor_train)
    val_dataset = MyData(input_tensor_val, target_tensor_val)

    dataset = DataLoader(train_dataset, batch_size=BATCH_SIZE,
                         drop_last=True,
                         shuffle=True)

    device = torch.device("cpu")

    encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
    decoder = Decoder(vocab_tar_size, embedding_dim, units, units, BATCH_SIZE)

    encoder.to(device)
    decoder.to(device)

    criterion = nn.CrossEntropyLoss()

    optimizer = optim.Adam(list(encoder.parameters()) + list(decoder.parameters()),
                           lr=0.001)

    EPOCHS = 10

    for epoch in range(EPOCHS):
        start = time()

        encoder.train()
        decoder.train()

        total_loss = 0

        for (batch, (inp, targ, inp_len)) in enumerate(dataset):
            loss = 0

            xs, ys, lens = sort_batch(inp, targ, inp_len)
            enc_output, enc_hidden = encoder(xs.to(device), lens, device)
            dec_hidden = enc_hidden
            dec_input = torch.tensor([[output_lang.word2index['<sos>']]] * BATCH_SIZE)

            for t in range(1, ys.size(1)):
                predictions, dec_hidden, _ = decoder(dec_input.to(device),
                                                     dec_hidden.to(device),
                                                     enc_output.to(device))
                loss += loss_function(criterion, ys[:, t].to(device), predictions.to(device))
                # loss += loss_
                dec_input = ys[:, t].unsqueeze(1)

            batch_loss = (loss / int(ys.size(1)))
            total_loss += batch_loss

            optimizer.zero_grad()

            loss.backward()

            ### UPDATE MODEL PARAMETERS
            optimizer.step()

            if batch % 100 == 0:
                print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
                                                             batch,
                                                             batch_loss.detach().item()))

        ### TODO: Save checkpoint for model
        print('Epoch {} Loss {:.4f}'.format(epoch + 1,
                                            total_loss / N_BATCH))
        print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
コード例 #7
0
ファイル: flask_app.py プロジェクト: RedThorium/mysite
# A very simple Flask Hello World app for you to get started with...

from flask import Flask, render_template, request
from data import MyData

app = Flask(__name__)

theData = MyData()


@app.route('/')
def hello_world():
    return 'Hello from Meme World!, Dabbin on the haters'


@app.route('/nana')
def nan():
    return 'I\'m a bonana'


@app.route('/home')
def rend_home():
    return render_template('index.html')


@app.route('/about')
def about():
    return render_template('about.html')


@app.route('/data')
コード例 #8
0
parse.add_argument('--batch', type=int, default=32, help='')
parse.add_argument('--data', type=str, default='./test_set/test_set/', help='')
parse.add_argument('--no_cuda', action='store_true', default=False, help='')
args = parse.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(1)
if args.cuda:
    torch.cuda.manual_seed(1)
    cudnn.benchmark = True

transform = transforms.Compose([
    transforms.Resize((64, 64)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
dataset = MyData(args.data, transform)
dataloader = DataLoader(dataset,
                        batch_size=args.batch,
                        shuffle=False,
                        num_workers=4,
                        collate_fn=collate_fn)
for f in os.listdir(args.models):
    model = Classification(2, False)
    model.load_state_dict(torch.load(args.models + f)['models'])
    model.eval()
    if args.cuda:
        model.cuda()
    cnt = 0.0
    for i, (image, label) in enumerate(dataloader):
        image = Variable(image.cuda(), requires_grad=False)
        output = model(image, None)