Esempio n. 1
0
    def setDevice(self, device):
        device = toDevice(device)

        print("[DEMOD] Activating {} device.".format(device["label"]))

        self.sdr = SoapySDR.Device(device)
        self.sdr.setGainMode(SOAPY_SDR_RX, 0, True)
        self.sdr.setSampleRate(SOAPY_SDR_RX, 0, self.sfs)
        self.sdr.setFrequency(SOAPY_SDR_RX, 0, self.freq)

        self.device = str(device)
Esempio n. 2
0
    def setDevice(self, device):
        device = toDevice(device)

        print("[DEMOD] Activating {} device.".format(device["label"]))

        self.sdr = SoapySDR.Device(device)
        self.sdr.setGainMode(SOAPY_SDR_RX, 0, True)
        self.sdr.setFrequency(SOAPY_SDR_RX, 0, self.freq)
        supported_fs = self.sdr.getSampleRateRange(SOAPY_SDR_RX, 0)

        avfs = [
            [240e3, 256e3, 1.024e6, 2.5e6, 3.0e6],
            [960e3, 480e3, 240e3, 256e3, 768e3, 1.024e6, 2.5e6, 3.0e6],
        ]

        self.sfs = int(768e3)
        self.mfs = int(240e3)
        self.afs = int(48e3)

        for fs in reversed(supported_fs):
            for pfs in avfs[self.pmode]:
                if pfs >= fs.minimum() and pfs <= fs.maximum():
                    self.sfs = int(pfs)
                    break

        print("[DEMOD] Sampling Rate: {}".format(self.sfs))

        self.sdr_buff = 1024
        self.dsp_buff = self.sdr_buff * 8
        self.dec_out = int(np.ceil(self.dsp_buff / (self.sfs / self.mfs)))
        self.dsp_out = int(np.ceil(self.dec_out / (self.mfs / self.afs)))
        print(self.sdr_buff / self.sfs)
        self.dec = Decimator(self.sfs, self.mfs, self.dec_out, cuda=self.cuda)

        self.wbfm = WBFM(self.tau,
                         self.mfs,
                         self.afs,
                         self.dec_out,
                         cuda=self.cuda,
                         numba=self.numba)

        self.sdr.setSampleRate(SOAPY_SDR_RX, 0, self.sfs)
        self.device = str(device)
    def train(self):
        """Training process."""
        self.model.to(self.device)
        for epoch in range(self.start_epoch, self.epochs + self.start_epoch):
            self.scheduler.step()

            # Training
            train_loss = 0.0
            self.model.train()

            for local_batch, (centers, lefts,
                              rights) in enumerate(self.trainloader):
                # Transfer to GPU
                centers, lefts, rights = toDevice(centers,
                                                  self.device), toDevice(
                                                      lefts,
                                                      self.device), toDevice(
                                                          rights, self.device)

                # Model computations
                self.optimizer.zero_grad()
                datas = [centers, lefts, rights]
                for data in datas:
                    imgs, angles = data
                    # print("training image: ", imgs.shape)
                    outputs = self.model(imgs)
                    loss = self.criterion(outputs, angles.unsqueeze(1))
                    loss.backward()
                    self.optimizer.step()

                    train_loss += loss.data.item()

                if local_batch % 100 == 0:

                    print("Training Epoch: {} | Loss: {}".format(
                        epoch, train_loss / (local_batch + 1)))

            # Validation
            self.model.eval()
            valid_loss = 0
            with torch.set_grad_enabled(False):
                for local_batch, (centers, lefts,
                                  rights) in enumerate(self.validationloader):
                    # Transfer to GPU
                    centers, lefts, rights = toDevice(
                        centers, self.device), toDevice(
                            lefts, self.device), toDevice(rights, self.device)

                    # Model computations
                    self.optimizer.zero_grad()
                    datas = [centers, lefts, rights]
                    for data in datas:
                        imgs, angles = data
                        outputs = self.model(imgs)
                        loss = self.criterion(outputs, angles.unsqueeze(1))

                        valid_loss += loss.data.item()

                    if local_batch % 100 == 0:
                        print("Validation Loss: {}".format(valid_loss /
                                                           (local_batch + 1)))

            print()

            # Save model
            if epoch % 5 == 0 or epoch == self.epochs + self.start_epoch - 1:

                print("==> Save checkpoint ...")

                state = {
                    'epoch': epoch + 1,
                    'state_dict': self.model.state_dict(),
                    'optimizer': self.optimizer.state_dict(),
                    'scheduler': self.scheduler.state_dict(),
                }

                self.save_checkpoint(state)
Esempio n. 4
0
    def train(self):
        """Training process."""
        self.model.to(self.device)
        for epoch in range(self.start_epoch, self.epochs + self.start_epoch):
            self.scheduler.step()

            startTime = timeit.default_timer()

            # Training
            train_loss = 0.0
            self.model.train()

            for local_batch, (centers, lefts,
                              rights) in enumerate(self.trainloader):
                # Transfer to GPU
                centers, lefts, rights = toDevice(centers,
                                                  self.device), toDevice(
                                                      lefts,
                                                      self.device), toDevice(
                                                          rights, self.device)

                # Model computations
                self.optimizer.zero_grad()
                datas = [centers, lefts, rights]
                for data in datas:
                    imgs, angles = data
                    # print("training image: ", imgs.shape)
                    outputs = self.model(imgs)
                    loss = self.criterion(outputs, angles.unsqueeze(1))
                    loss.backward()
                    self.optimizer.step()

                    train_loss += loss.data.item()

                if local_batch % self.reportFreq == 0:
                    # calculate loss at the beginning, if the dataset is large,
                    # then report loss every 100 batches
                    curTrainLoss = train_loss / (local_batch + 1)
                    print("Training Epoch: {} | Loss: {}".format(
                        epoch, curTrainLoss))
                    self.trainLoss.append(curTrainLoss)

            # Validation
            self.model.eval()
            valid_loss = 0
            with torch.set_grad_enabled(False):
                for local_batch, (centers, lefts,
                                  rights) in enumerate(self.validationloader):
                    # Transfer to GPU
                    centers, lefts, rights = toDevice(
                        centers, self.device), toDevice(
                            lefts, self.device), toDevice(rights, self.device)

                    # Model computations
                    self.optimizer.zero_grad()
                    datas = [centers, lefts, rights]
                    for data in datas:
                        imgs, angles = data
                        outputs = self.model(imgs)
                        loss = self.criterion(outputs, angles.unsqueeze(1))

                        valid_loss += loss.data.item()

                    if local_batch % self.reportFreq == 0:
                        curValLoss = valid_loss / (local_batch + 1)
                        print("Validation Loss: {}".format(curValLoss))
                        self.validationLoss.append(curValLoss)

            stopTime = timeit.default_timer()
            print('Time for single round (s):', stopTime - startTime)

            # Save model
            if epoch % 5 == 0 or epoch == self.epochs + self.start_epoch - 1:

                print("==> Save checkpoint ...")

                state = {
                    'epoch': epoch + 1,
                    'state_dict': self.model.state_dict(),
                    'optimizer': self.optimizer.state_dict(),
                    'scheduler': self.scheduler.state_dict(),
                }

                self.save_checkpoint(state)
                if epoch == self.epochs + self.start_epoch - 1:
                    modelPath = self.ckptroot + self.dataType + '/'
                    with open(modelPath + 'trainLoss.txt', 'w') as f:
                        for item in self.trainLoss:
                            f.write("%s\n" % item)
                    with open(modelPath + 'valLoss.txt', 'w') as f:
                        for item in self.validationLoss:
                            f.write("%s\n" % item)