Пример #1
0
    def __init__(self, parent=None):#параметры по умолчанию класса MainWindow
        QtGui.QMainWindow.__init__(self, parent)

        #self — описание окна, определение переменных виджетов 
        self.setGeometry(300, 300, 1500, 800)
        mainWidget = QtGui.QWidget()
        self.setCentralWidget(mainWidget)

        #Настройка меню
        menu = self.menuBar()
        list1 = menu.addMenu('File')
        list1.addAction('exit')
        list1.addAction('save')
        list1.addAction('save as')

        #Настройка панелей инструментов
        toolbar = self.addToolBar('Exit')
        toolbar.addAction('exit')
        toolbar.addAction('save')
        toolbar.addAction('save as')

        #определение переменных кнопок, редакторов, сеток.
        self.rButton = QtGui.QPushButton("Right")
        self.lButton = QtGui.QPushButton("Left")
        self.dButton = QtGui.QPushButton("Download")

        self.label1 = QtGui.QLabel()
        self.label2 = QtGui.QLabel()

        self.textEdit = QtGui.QTextBrowser(readOnly=1)
        self.textEdit.setOpenExternalLinks(True)

        grid = QtGui.QGridLayout(mainWidget)
        #положение на сетке
        #grid.setSpacing(100)
        grid.setColumnMinimumWidth(0, 400)
        grid.setColumnMinimumWidth(1, 400)
        grid.setColumnMinimumWidth(2, 200)
        grid.addWidget(self.label1, 0, 0)
        grid.addWidget(self.label2, 0, 1)
        grid.addWidget(self.lButton, 1, 0)
        grid.addWidget(self.rButton, 1, 1)
        grid.addWidget(self.dButton, 1, 2)
        grid.addWidget(self.textEdit, 0, 2)


        self.person=functions.unpickle()
        #self.person=self.person[:10]
        self.rButton.clicked.connect(self.rbutton)
        self.lButton.clicked.connect(self.lbutton)
        self.dButton.clicked.connect(self.dbutton)
        #self.person[13].elo=200
        self.update(2)
        self.setGeometry(300, 300, 250, 150)
Пример #2
0
 def dbutton(self):
     functions.download()
     self.person=functions.unpickle()
     self.update(2)
Пример #3
0
def train_fc(learning_rate=1e-4,
             num_epochs=50,
             decay_every=50,
             gamma=0.1,
             batch_size=256,
             num_batches=100,
             display_info=True,
             hidden_width=1536):
    TRAIN_SET_SIZE = batch_size * num_batches

    file_name = "data/cifar-10-batches-py/data_batch_1"
    data = f.unpickle(file_name)

    ims = f.open_data_ims(50000)
    x, y = f.seperate_xy_ims(ims[0:TRAIN_SET_SIZE])

    # figure our device here
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if torch.cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    print("Using device:{}, cuda:{}, pytorch:{}".format(
        device, torch.version.cuda, torch.__version__))

    m = fc.FC_1(1536, hidden_width, 1536)
    if torch.cuda.is_available():
        m = m.cuda()  # transfer model to cuda as needed
    lossFunction = nn.L1Loss()
    optimizer = torch.optim.Adam(m.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=decay_every,
                                                gamma=gamma)

    x = torch.tensor(x, dtype=torch.float)
    y = torch.tensor(y, dtype=torch.float)

    x, y = x / 256.0, y / 256.0  # regularize

    # introduce a dataset class
    train_dataset = datasets.VectorizedDataset(x, y)
    train_loader = datasets.DataLoader(dataset=train_dataset,
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=0)

    # remember loss graph
    losses = np.zeros(num_epochs, dtype=np.float)

    print("Started training.")
    train_start_time = time.time()

    for epoch in range(num_epochs):
        if epoch != 0 and epoch % int(num_epochs / 10) == 0:
            print("{}% done! loss:{}".format(epoch / num_epochs * 100.0,
                                             losses[epoch - 1]))

        epoch_losses = 0.0

        # perform MINI-BATCH grad_descent here
        for i, data in enumerate(train_loader, 0):
            x_batch, y_batch = data

            y_pred = m.forward(x_batch)
            loss = lossFunction(y_pred, y_batch)
            epoch_losses += loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        losses[epoch] = epoch_losses / num_batches

    print("Training took {} seconds!".format(time.time() - train_start_time))

    if display_info:
        # try to write this first layer to a video
        v = m.linear1.weight[:, 0:512].detach().cpu().numpy()
        num_frames = v.shape[0]
        v = v.reshape(num_frames, 16, 32)
        v = np.absolute(v)
        v = v * (1.0 / v.max()) * 255.0  # normailize 0->255
        f.save_video(v)

        # print loss graph
        print(losses)
        plt.plot(losses)
        plt.show()

        # print results
        y_pred = m.forward(x)
        x, y_pred = x.cpu().detach().numpy(), y_pred.detach().cpu().numpy()
        # x, y_pred = np.asarray(x.data), np.asarray(y_pred.data)
        x, y_pred = x * 256.0, y_pred * 256.0
        x, y_pred = x[0:50], y_pred[0:50]
        fuse_xy_pred = f.combine_xy_ims(x, y_pred)
        big_im = f.comb_ims(fuse_xy_pred, 32, 32)
        f.show_im_std(big_im)

        # print image of the last FIRST layer weights
        s = m.linear1.weight[0, 0:512].detach().cpu().numpy()
        s = s.reshape(16, 32)
        s = np.absolute(s)
        s *= (1.0 / s.max())  # normalize 0->1
        plt.imshow(s, interpolation='nearest')
        plt.show()

    return losses[num_epochs - 1]