def download_dataset(self, folder_path):
        url = "http://cs231n.stanford.edu/tiny-imagenet-200.zip"
        from models.utils import Utils
        folder_path = Utils.download_file(folder_path, url=url)

        return Utils.extract_zip_file(file_path=folder_path,
                                      extract_path='data')
Пример #2
0
    def _get_blog_single(self, par):

        ut = Utils()
        self._build_widgets_single()

        o = self._obj
        # if it is the single blog view and the user is an editor then load up the images for the image select box
        if self._req.sesh().can_edit():
            o['images'] = Image.all().order("-date").fetch(self._conf.IMAGES_VIEWER_COUNT)

        content = Content.get_by_key_name(par[1])
        if content:
            o['data'] = self.copy_bits(content)
        
            # versions for admin - perhaps optimise this out with a isAdmin clause
            o['data'].otherversions = content.contentversion_set
            ov = []
            for c in content.contentversion_set:
                c.nicetime = c.createtime.strftime('%e %b %Y - %H:%M:%S')
                ov.append(c)
            o['data'].otherversions = ov
            
            # what to put in the meta description - needed for singe blog post only
            if content.current.summary:
                o['data'].metadesc = ut.strip_tags(content.current.summary)
            else:
                o['data'].metadesc = o['data'].title
     
            # no paging controls - for a single blog
            o['page'] = {}
            
        else:
            return self._req.notfound()

        self._respond(path='blog-single', obj=self._obj)
Пример #3
0
    def parse_lectures_list(self, course_id):
        with self.session.get(course_url.substitute(course_id=course_id),
                              cookies=self.cookies,
                              verify=False) as course:
            lxml = html.fromstring(html=course.content)

            urls = [_url.attrib['href'] for _url in lxml.find_class('item')]
            lids = [Utils.get_id_from_url(_url) for _url in urls]
            titles = [
                Utils.flush(_title.text)
                for _title in lxml.find_class('lecture-name')
            ]
            wids = [self.parse_wistia_id(course_id, _lid) for _lid in lids]
            durations = [
                _dur.attrib['aria-valuemax']
                for _dur in lxml.find_class('w-player-wrapper')
            ]

            lectures = [
                Lecture(_id=_id,
                        title=_title,
                        source=_wid,
                        duration=_dur,
                        path=_url) for _id, _title, _wid, _dur, _url in zip(
                            lids, titles, wids, durations, urls)
            ]

            if self.lectures_list_len < 1:
                self.lectures_list_len = len(lectures)
            return lectures
Пример #4
0
    def _post_new_group(self, par):

        ut = Utils()
        key = ut.cleanUserName(self._req.par('cid'))
        group = Group(key_name=key)
        group.title = self._req.par('ctitle')
        group.put()

        return self._req.redirect(path=self._conf.BLOG + '/' + key)
Пример #5
0
 def download_lecture_from_cdn(selfs, lecture, course_title):
     filename = f"{lecture.title}.mp4"
     course_dir = Path("mosh_courses").resolve() / course_title
     file = requests.get(lecture.path)
     file_size = int(file.headers['Content-Length'])
     print(
         download_lecture_from_course_template.substitute(
             lecture=filename,
             course=course_title,
             directory=course_dir,
             fileSize=file_size))
     Utils.write_file(course_dir, filename, file.content)
     file.close()
Пример #6
0
    def _get_blog_single(self, par):

        ut = Utils()
        self._build_widgets_single()

        o = self._obj
        # if it is the single blog view and the user is an editor then load up the images for the image select box
        if self._req.sesh().can_edit():
            o['images'] = Image.all().order("-date").fetch(self._conf.IMAGES_VIEWER_COUNT)

        content = Content.get_by_key_name(par[1])
        if content:
            if self._req.sesh().can_edit():
                o['data'] = content.editing
            else:
                o['data'] = content.current

            o['data'].ctype = content.ctype
            o['data'].status = content.status
            o['data'].otherversions = content.contentversion_set
            ov = []
            for c in content.contentversion_set:
                c.nicetime = c.createtime.strftime('%e %b %Y - %H:%M:%S')
                ov.append(c)

            o['data'].otherversions = ov
            o['data'].keyname = content.key().name()
            o['data'].group = content.group
     
            # what to put in the meta description 
            if content.current.summary:
                o['data'].metadesc = ut.strip_tags(content.current.summary)
            else:
                o['data'].metadesc = o['data'].title
     
            o['data'].nicedate = content.sortdate.strftime('%e %b %Y')

            # use hard coded imagepath, or the mainimage's url
            if not o['data'].imagepath:
                if o['data'].mainimage:
                    o['data'].imagepath = o['data'].mainimage.serving_url

            # no paging controls - for a single blog
            o['page'] = {}
            
        else:
            return self._req.notfound()

        self._respond(path='blog-single', obj=self._obj)
Пример #7
0
 def as_dict(self):
     return {
         'id': self.id,
         'token': self.token,
         'expires': Utils.get_unix_timestamp(self.expires),
         'user': self.user.as_dict()
     }
Пример #8
0
    def training_step(self, batch, batch_idx):
        images, masks, _, _ = batch

        images, masks = Utils.preprocessing(images, masks, self.WL, self.WW)
        images, masks = Utils.do_train_augmentations(images, masks,
                                                     self.gaussian_noise_std,
                                                     self.device, self.ra,
                                                     self.rf)

        y_hat = self(images)

        # loss dim is [batch, 1, img_x, img_y]
        # need to get rid of the second dimension so
        # size matches with mask
        loss = self.loss(y_hat[:, 0, :, :], masks)

        # Logs
        #tensorboard_logs = {'train_loss': loss}
        return {'loss': loss}  #, 'log': tensorboard_logs}
Пример #9
0
    def download_lecture_from_wistia(self, lecture, course_title):
        response = requests.get(
            wistia_json_url_template.substitute(wistia_id=lecture.source))
        json_response = json.loads(Utils.get_json_from_callback(response.text))
        response.close()

        media = json_response["media"]

        ready_mp4_url = media["assets"][0]["url"]
        filename = f"{lecture.title}.mp4"

        course_dir = Path("mosh_courses").resolve() / course_title

        file = requests.get(ready_mp4_url)
        print(
            download_lecture_from_course_template.substitute(
                lecture=filename, course=course_title, directory=course_dir))

        Utils.write_file(course_dir, filename, file.content)
        file.close()
Пример #10
0
    def plotConfusionMatrix(self):
        """ Plot a confusion matrix. """
        
        if self.debug: print('In plotConfusionMatrix')
        
        utils = Utils(debug=True)

        cnf_matrix = confusion_matrix(self.y_test, self.predictions)
        plt.figure()
        utils.plot_confusion_matrix(cnf_matrix, classes=['Pos Sendiment','Neg Sendiment'], title='Confusion matrix, without normalization')
        plt.figure()
        utils.plot_confusion_matrix(cnf_matrix, classes=['Pos Sendiment','Neg Sendiment'], normalize=True, title='Confusion matrix, with normalization')
        
        if self.debug: utils.printTP_FP_TN_FN(cnf_matrix)
Пример #11
0
    def _post_new_content(self, par):
        
        user = User(self._req.sesh())
        person = user.get_sesh_person()

        ut = Utils()
        key = ut.cleanUserName(self._req.par('cid'))

        # create the content record, by unique key - !important - replaces existing key - warn user?
        content = Content(key_name=key)
         
        content.status = 'draft'
        groupname = self._req.par('cgroup')
        content.group = Group.get_by_key_name(groupname)

        # create a new version
        contentver = ContentVersion()
        
        # some defaults
        contentver.title = self._req.par('ctitle')
        contentver.content = '<p>Placeholder for the body of your Article</p>' 
        contentver.summary = '<p>Placeholder for the article summary</p>' 
        contentver.person = person

        # have to put to get a reference
        contentver.put()

        # upate versions on this content
        content.current = contentver
        content.editing = contentver
        content.put()

        # link to my parent - in fact shouldnt I have used parents (but then sharding wont be an issue for a few, even 100's of, articles)
        contentver.mycontent = content
        contentver.put()

        # and redirect to the new content
        return self._req.redirect(path=self._conf.BLOG + '/' + groupname + '/' + key)
Пример #12
0
 def parse_lecture_from_course(self, course_id, lecture_id):
     with self.session.get(
             f"https://codewithmosh.com/courses/{course_id}/lectures/{lecture_id}",
             cookies=self.cookies,
             verify=False) as lecture:
         soup = html.fromstring(html=lecture.text)
         title = Utils.flush(soup.xpath("//h2")[1].text_content())
         url = soup.xpath("//*[contains(@class,'download')]")
         if len(url) < 1:
             yield Lecture(_id=lecture_id, title=title)
         yield Lecture(
             _id=lecture_id,
             title=title,
             path=url[0].attrib['href'] if 'href' in url[0].attrib else '')
Пример #13
0
    def test_step(self, batch, batch_nb):
        images, masks, _, _ = batch

        images, masks = Utils.preprocessing(images, masks, self.WL, self.WW)

        y_hat = self(images)

        # loss dim is [batch, 1, img_x, img_y]
        # need to get rid of the second dimension so
        # size matches with mask
        loss = self.loss(y_hat[:, 0, :, :], masks)

        # Logs
        #tensorboard_logs = {'val_loss': loss}
        return {'test_loss': loss}  #, 'log': tensorboard_logs}
Пример #14
0
 def calculate_total_duration(self):
     duration = 0.0
     for lecture in self.lectures:
         duration += lecture.duration
     return Utils.from_secs_to_time(duration)
Пример #15
0
 def _generate_token():
     return hashlib.md5(str(datetime.now()) +
                        "//" +
                        Utils.generate_random_string()).hexdigest()
Пример #16
0
    def migrate(dom=None, msg=None):
        assert isinstance(dom, libvirt.virDomain)
        assert isinstance(msg, dict)

        # https://rk4n.github.io/2016/08/10/qemu-post-copy-and-auto-converge-features/
        flags = libvirt.VIR_MIGRATE_PERSIST_DEST | \
            libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | \
            libvirt.VIR_MIGRATE_COMPRESSED | \
            libvirt.VIR_MIGRATE_PEER2PEER | \
            libvirt.VIR_MIGRATE_AUTO_CONVERGE

        root = ET.fromstring(dom.XMLDesc())

        if msg['storage_mode'] == StorageMode.local.value:
            # 需要把磁盘存放路径加入到两边宿主机的存储池中
            # 不然将会报 no storage pool with matching target path '/opt/Images' 错误
            flags |= libvirt.VIR_MIGRATE_NON_SHARED_DISK
            flags |= libvirt.VIR_MIGRATE_LIVE

            if not dom.isActive():
                err = u'非共享存储不支持离线迁移。'
                log_emit.warn(err)
                raise RuntimeError(
                    'Nonsupport offline migrate with storage of non sharing mode.'
                )

            ssh_client = Utils.ssh_client(hostname=msg['duri'].split('/')[2],
                                          user='******')

            for _disk in root.findall('devices/disk'):
                _file_path = _disk.find('source').get('file')
                disk_info = Storage.image_info_by_local(path=_file_path)
                disk_size = disk_info['virtual-size']
                stdin, stdout, stderr = ssh_client.exec_command(' '.join([
                    'qemu-img', 'create', '-f', 'qcow2', _file_path,
                    str(disk_size)
                ]))

                for line in stdout:
                    log_emit.info(line)

                for line in stderr:
                    log_emit.error(line)

        elif msg['storage_mode'] in [
                StorageMode.shared_mount.value, StorageMode.ceph.value,
                StorageMode.glusterfs.value
        ]:
            if dom.isActive():
                flags |= libvirt.VIR_MIGRATE_LIVE
                flags |= libvirt.VIR_MIGRATE_TUNNELLED

            else:
                flags |= libvirt.VIR_MIGRATE_OFFLINE

        # duri like qemu+ssh://destination_host/system
        if dom.migrateToURI(duri=msg['duri'], flags=flags) == 0:
            if msg['storage_mode'] == StorageMode.local.value:
                for _disk in root.findall('devices/disk'):
                    _file_path = _disk.find('source').get('file')
                    if _file_path is not None:
                        os.remove(_file_path)

        else:
            raise RuntimeError('Unknown storage mode.')
Пример #17
0
 def __init__(self):
     self.session = requests.Session()
     self.cookies = Utils.load_cookies()
     self.lectures_list_len = 0
     self.courses_list_len = 0