def download_slides(slides_remote, prefix='data'):
    slides = []
    for slide in slides_remote:
        filename = "slides/{}".format(slide.split('/')[-1])
        helper.download_file(slide, prefix + '/' + filename)
        slides.append(filename)
    return slides
Example #2
0
def download_datasets(link, folder):
    page = con.session.get(helper.fix_link(link))
    soup = BeautifulSoup(page.text, 'html.parser')

    dataset = soup.findAll('a', {
        'href': re.compile('^https'),
        'class': re.compile('^link-borderless')
    })
    if len(dataset) == 0:
        sys.stdout.write(
            f'{bcolors.FAIL}No dataset found!{bcolors.ENDC}\n')
        return

    titles = [x.text.strip() for x in dataset]
    all_links = [x['href'] for x in dataset]
    sys.stdout.write(
        f'{bcolors.BOLD}Downloading dataset...{bcolors.ENDC}\n')
    if not os.path.exists(folder):
        os.mkdir(folder)
    if(not os.path.exists(os.path.join(folder, 'Dataset'))):
        os.mkdir(os.path.join(folder, 'Dataset'))
    for link, title in zip(all_links, titles):
        dir = os.path.join(folder, 'Dataset', title) + \
            '.' + link.split('.')[-1]
        helper.download_file(con, link, dir)
Example #3
0
def download_slides(course_id, folder):
    page = con.session.get(
        'https://www.datacamp.com/courses/{}/continue'.format(course_id))
    slide_links = set(
        re.findall(r'(https?://s3.[/|\w|:|.|-]+[^/])&', page.text))
    slide_links = slide_links.union(
        set(re.findall(r'(https?://projector[/|\w|:|.|-]+[^/])&', page.text)))
    if len(slide_links) == 0:
        sys.stdout.write(f'{bcolors.FAIL}No slides found!{bcolors.ENDC}\n')
        return

    sys.stdout.write(f'{bcolors.BOLD}Downloading slides...{bcolors.ENDC}\n')
    for link in slide_links:
        helper.download_file(con, link,
                             os.path.join(folder,
                                          link.split('/')[-1]))
Example #4
0
def _test_photo_type_get(type, date):
    temporary_folder, folder = helper.create_working_folder()

    photo_name = 'photo.{}'.format(type)
    photo_file = helper.get_file(photo_name)
    origin = '{}/{}'.format(folder, photo_name)

    if not photo_file:
        photo_file = helper.download_file(photo_name, folder)
        if not photo_file or not os.path.isfile(photo_file):
            raise SkipTest('{} file not downlaoded'.format(type))

        # downloading for each test is costly so we save it in the working directory
        file_path_save_as = helper.get_file_path(photo_name)
        if os.path.isfile(photo_file):
            shutil.copyfile(photo_file, file_path_save_as)

    shutil.copyfile(photo_file, origin)

    photo = Photo(origin)
    metadata = photo.get_metadata()

    shutil.rmtree(folder)

    assert metadata['date_taken'] == helper.time_convert(date), '{} date {}'.format(type, metadata['date_taken'])
Example #5
0
def test_set_metadata_on_arw():
    temporary_folder, folder = helper.create_working_folder()

    photo_file = helper.get_file('photo.arw')
    origin = '%s/photo.arw' % folder

    if not photo_file:
        photo_file = helper.download_file('photo.arw', folder)
        if not photo_file or not os.path.isfile(photo_file):
            raise SkipTest('arw file not downlaoded')

    shutil.copyfile(photo_file, origin)

    photo = Photo(origin)
    origin_metadata = photo.get_metadata()

    status = photo.set_location(11.1111111111, 99.9999999999)

    assert status == True, status

    photo_new = Photo(origin)
    metadata = photo_new.get_metadata()

    shutil.rmtree(folder)

    assert metadata['date_taken'] == helper.time_convert((2007, 4, 8, 17, 41, 18, 6, 98, 0)), metadata['date_taken']
    assert helper.isclose(metadata['latitude'], 11.1111111111), metadata['latitude']
    assert helper.isclose(metadata['longitude'], 99.9999999999), metadata['longitude']
Example #6
0
def test_set_metadata_on_rw2():
    raise SkipTest('gh-94 Writing to RW2 images is not supported')
    temporary_folder, folder = helper.create_working_folder()

    photo_file = helper.get_file('photo.rw2')
    origin = '%s/photo.rw2' % folder

    if not photo_file:
        photo_file = helper.download_file('photo.rw2', folder)
        if not photo_file or not os.path.isfile(photo_file):
            raise SkipTest('rw2 file not downlaoded')

    shutil.copyfile(photo_file, origin)

    photo = Photo(origin)
    origin_metadata = photo.get_metadata()

    status = photo.set_location(11.1111111111, 99.9999999999)

    assert status == True, status

    photo_new = Photo(origin)
    metadata = photo_new.get_metadata()

    shutil.rmtree(folder)

    assert metadata['date_taken'] == helper.time_convert((2014, 11, 19, 23, 7, 44, 2, 323, 0)), metadata['date_taken']
    assert helper.isclose(metadata['latitude'], 11.1111111111), metadata['latitude']
    assert helper.isclose(metadata['longitude'], 99.9999999999), metadata['longitude']
Example #7
0
def _test_photo_type_get(type, date):
    temporary_folder, folder = helper.create_working_folder()

    photo_name = 'photo.{}'.format(type)
    photo_file = helper.get_file(photo_name)
    origin = '{}/{}'.format(folder, photo_name)

    if not photo_file:
        photo_file = helper.download_file(photo_name, folder)
        if not photo_file or not os.path.isfile(photo_file):
            raise SkipTest('{} file not downlaoded'.format(type))

        # downloading for each test is costly so we save it in the working directory
        file_path_save_as = helper.get_file_path(photo_name)
        if os.path.isfile(photo_file):
            shutil.copyfile(photo_file, file_path_save_as)

    shutil.copyfile(photo_file, origin)

    photo = Photo(origin)
    metadata = photo.get_metadata()

    shutil.rmtree(folder)

    assert metadata['date_taken'] == helper.time_convert(
        date), '{} date {}'.format(type, metadata['date_taken'])
Example #8
0
def _test_photo_type_set(type, date):
    temporary_folder, folder = helper.create_working_folder()

    photo_name = 'photo.{}'.format(type)
    photo_file = helper.get_file(photo_name)
    origin = '{}/{}'.format(folder, photo_name)

    if not photo_file:
        photo_file = helper.download_file(photo_name, folder)
        if not photo_file or not os.path.isfile(photo_file):
            raise SkipTest('{} file not downlaoded'.format(type))

    shutil.copyfile(photo_file, origin)

    photo = Photo(origin)
    origin_metadata = photo.get_metadata()

    status = photo.set_location(11.1111111111, 99.9999999999)

    assert status == True, status

    photo_new = Photo(origin)
    metadata = photo_new.get_metadata()

    shutil.rmtree(folder)

    assert metadata['date_taken'] == helper.time_convert(
        date), '{} date {}'.format(type, metadata['date_taken'])
    assert helper.isclose(metadata['latitude'],
                          11.1111111111), '{} lat {}'.format(
                              type, metadata['latitude'])
    assert helper.isclose(metadata['longitude'],
                          99.9999999999), '{} lon {}'.format(
                              type, metadata['latitude'])
Example #9
0
def _test_photo_type_set(type, date):
    temporary_folder, folder = helper.create_working_folder()

    photo_name = 'photo.{}'.format(type)
    photo_file = helper.get_file(photo_name)
    origin = '{}/{}'.format(folder, photo_name)

    if not photo_file:
        photo_file = helper.download_file(photo_name, folder)
        if not photo_file or not os.path.isfile(photo_file):
            raise SkipTest('{} file not downlaoded'.format(type))

    shutil.copyfile(photo_file, origin)

    photo = Photo(origin)
    origin_metadata = photo.get_metadata()

    status = photo.set_location(11.1111111111, 99.9999999999)

    assert status == True, status

    photo_new = Photo(origin)
    metadata = photo_new.get_metadata()

    shutil.rmtree(folder)

    assert metadata['date_taken'] == helper.time_convert(date), '{} date {}'.format(type, metadata['date_taken'])
    assert helper.isclose(metadata['latitude'], 11.1111111111), '{} lat {}'.format(type, metadata['latitude'])
    assert helper.isclose(metadata['longitude'], 99.9999999999), '{} lon {}'.format(type, metadata['latitude'])
Example #10
0
async def camp(ctx, arg):
    try:
        chapter, stage = arg.split('-')
        # SEARCH
        if ctx.message.channel.name == SEARCH_CHANNEL and not ctx.message.author.bot:
            return_message = ""
            search_folder = get_folder_id_by_name(chapter, service, memo)
            stage_ids = search_file_in_folder(search_folder, stage, service)
            if stage_ids is not None:
                for stage_id, file_name in stage_ids:
                    # return_message += f"Stage link: https://drive.google.com/file/d/{stage_id}\n"
                    if DB is not None:
                        stage_doc = DB.table(UPLOADED_STAGES).get(
                            User.file_id == stage_id)
                    if stage_doc:
                        return_message += f"Upload caption: {stage_doc['message']}\n"

                    try:
                        stage_file = download_file(stage_id, service)
                        sending_file = File(stage_file, f"{file_name}.jpg")
                        await ctx.send(return_message, file=sending_file)
                    except Exception as e:
                        print(e)
                        print(e.args)
            else:
                await ctx.send(f"Couldn't find it, sowwy {PEPE}")
            return

        if not (ctx.message.channel.name == CHANNEL_NAME
                and not ctx.message.author.bot):
            return

        for attachment in ctx.message.attachments:
            uploaded_file_id = upload_file(service, attachment.url, chapter,
                                           ctx.message.author, stage, memo)
            if uploaded_file_id is not None:
                print(f"Chapter: {chapter} - Stage: {stage}")
                DB.table(LAST_UPLOADED_TABLE).upsert(
                    {
                        'userId': ctx.author.id,
                        'file_id': uploaded_file_id,
                        'message': ctx.message.content,
                        'message_id': ctx.message.id,
                        'removed': False
                    }, User.userId == ctx.author.id)
                DB.table(UPLOADED_STAGES).insert({
                    'userId':
                    ctx.author.id,
                    'file_id':
                    uploaded_file_id,
                    'message':
                    ctx.message.content.replace(f"a!camp {arg}", '')
                })
                print(ctx.message.content)
                print(ctx.message.content.replace(f"a!camp {arg}", ''))
                await ctx.message.add_reaction('πŸ‘')
            else:
                await ctx.message.add_reaction('πŸ‘Ž')
    except:
        await ctx.message.add_reaction('πŸ‘Ž')
Example #11
0
def test_set_original_name():
    files = ['plain.jpg', 'audio.m4a', 'photo.nef', 'video.mov']

    for file in files:
        ext = os.path.splitext(file)[1]

        temporary_folder, folder = helper.create_working_folder()

        random_file_name = '%s%s' % (helper.random_string(10), ext)
        origin = '%s/%s' % (folder, random_file_name)
        file_path = helper.get_file(file)
        if file_path is False:
            file_path = helper.download_file(file, folder)

        shutil.copyfile(file_path, origin)

        media = Media.get_class_by_file(origin, [Audio, Media, Photo, Video])
        metadata = media.get_metadata()
        media.set_original_name()
        metadata_updated = media.get_metadata()

        shutil.rmtree(folder)

        assert metadata['original_name'] is None, metadata['original_name']
        assert metadata_updated['original_name'] == random_file_name, metadata_updated['original_name']
Example #12
0
def download_course_data(conn, link, path):
    page = con.session.get(helper.embbed_link(link))
    soup = BeautifulSoup(page.text, 'html.parser')

    dataset = soup.findAll('a', {
        'href': re.compile('^https'),
        'class': re.compile('^link-borderless')
    })

    titles = [x.text.strip() for x in dataset]
    all_links = [x['href'] for x in dataset]
    sys.stdout.write(f'{bcolors.BOLD}Downloading dataset...{bcolors.ENDC}\n')
    if not os.path.exists(path):
        os.mkdir(path)
    if (not os.path.exists(os.path.join(path, 'Dataset'))):
        os.mkdir(os.path.join(path, 'Dataset'))
    for link, title in zip(all_links, titles):
        dir = os.path.join(path, 'Dataset', title)
        dir = dir + '.' + link.split('.')[-1]
        download_file(con, link, dir)
Example #13
0
def test_get_metadata_from_arw():
    temporary_folder, folder = helper.create_working_folder()

    photo_file = helper.get_file('photo.arw')
    origin = '%s/photo.arw' % folder

    if not photo_file:
        photo_file = helper.download_file('photo.arw', folder)
        if not photo_file or not os.path.isfile(photo_file):
            raise SkipTest('arw file not downlaoded')

        # downloading for each test is costly so we save it in the working directory
        file_path_save_as = helper.get_file_path('photo.arw')
        if os.path.isfile(photo_file):
            shutil.copyfile(photo_file, file_path_save_as)

    shutil.copyfile(photo_file, origin)

    photo = Photo(origin)
    metadata = photo.get_metadata()

    shutil.rmtree(folder)

    assert metadata['date_taken'] == helper.time_convert((2007, 4, 8, 17, 41, 18, 6, 98, 0)), metadata['date_taken']
Example #14
0
	def setup(self, solution_name, settings):
		log = Logger()
		# check if nvda is allready installed on the system, if not, install it
		standard_nvda_path = "C:\\Programme\\NVDA\\nvda.exe"
		nvda_path = helper.is_process_running("nvda.exe")
		if nvda_path == "":
			nvda_path = standard_nvda_path
		
		if os.path.exists(nvda_path) == False:
			# nvda download url
			url = "http://downloads.sourceforge.net/project/nvda/releases/2012.2.1/nvda_2012.2.1.exe"
			# download nvda installer
			nvda_installer = helper.download_file(url)
			if nvda_installer == "":
				return 200
			# install nvda
			rc = subprocess.call([nvda_installer, "--install"])
			if rc > 0:
				return 201
		# if nvda runs, exit it
		nvda_path = helper.is_process_running("nvda.exe")
		if nvda_path != "":
			rc = subprocess.call([nvda_path, "-q"])
			if rc > 0:
				return 202
		else:
			nvda_path = standard_nvda_path
		
		# configure the program and start it
		config_file = os.environ['APPDATA'] + "\\nvda\\nvda.ini"
		if os.path.exists(config_file) == False:
			return 203
	
		# parse the ini config file
		try:
			config = ConfigObj(config_file)
		except configobj.ParseError:
			return 204


		# apply the settings
		attrs_without_value = []
		for attr in self.get_solution_list()[solution_name]:
			if settings.has_key(attr) == True:
				value = self.convert_values(attr, settings[attr])
				if value == None:
					log.log_msg("NVDA: The attribute " + attr + " couldn't be converted into a NVDA specific format, skipped", "warning")
					continue
				if attr == "preferred-lang":
					try:
						config['speech']['espeak']['voice'] = value['voice']
					except:
						log.log_msg("NVDA: Error while changing the attribute " + attr + " in the NVDA settings file.", "warning")
				if attr == "speech-rate":
					try:
						config['speech']['espeak']['rate'] = value['rate']
						config['speech']['espeak']['rateBoost'] = value['rateBoost']
					except:
						log.log_msg("NVDA: Error while changing the attribute " + attr + " in the NVDA settings file.", "warning")
			else:
				attrs_without_value.append(attr)

		# list of attributes without a corresponding value
		if len(attrs_without_value) > 0:
			attr_str = ""
			for attr in attrs_without_value:
				attr_str.join(attr)
			log.log_msg("NVDA: The following supported attributes have no value in the user profile: " + attr_str, "warning")

		# write back the nvda settings file
		try:
			config.write()
		except:
			return 205

		# start configured nvda
		rc = os.popen(nvda_path)
#		rc = os.system(nvda_path + " &")
		print "start nvda, rc = ", rc
		return 0
Example #15
0
def download_videos(course_id, folder):
    chapters = get_course_chapters(course_id)
    display_text = True
    for chapter in chapters['user_chapters']:
        page = con.session.get(
            'https://www.datacamp.com/courses/{}/chapters/{}/continue'.format(
                course_id, chapter['chapter_id']))
        video_ids = set(
            re.findall(r';(course_{}_[\d|\w]+)&'.format(course_id), page.text))
        video_type = 1
        if len(video_ids) == 0:
            video_ids = set(
                re.findall(r'(//videos.[/|\w|:|.|-]+[^/])&', page.text))
            video_type = 2
        if len(video_ids) == 0:
            sys.stdout.write(f'{bcolors.FAIL}No videos found!{bcolors.ENDC}\n')
            return

        if display_text:
            sys.stdout.write(
                f'{bcolors.BOLD}Downloading videos...{bcolors.ENDC}\n')
            display_text = False

        for video_id in video_ids:
            while True:
                try:
                    if video_type == 1:
                        video_page = con.session.get(
                            'https://projector.datacamp.com/?projector_key=' +
                            video_id)
                    elif video_type == 2:
                        video_page = con.session.get(
                            'https://projector.datacamp.com/?video_hls=' +
                            video_id)
                except:
                    helper.handle_error(con)
                    continue
                break
            soup = BeautifulSoup(video_page.text, 'html.parser')
            video_url = json.loads(
                soup.find("input", {"id": "videoData"})['value'])

            link = video_url['video_mp4_link']

            if link is None:
                sys.stdout.write(
                    f'{bcolors.FAIL}Videos cannot be downloaded!{bcolors.ENDC}\n'
                )
                return
            if link.endswith('mp4') and not link.startswith('http'):
                link = 'https://' + link[2:]
                name = link.split('/')[-1]
            else:
                if video_type == 1:
                    video_name_url = json.loads(
                        soup.find("input", {"id": "slideDeckData"})['value'])
                    link_name = video_name_url['plain_video_mp4_link']
                    if link_name is not None:
                        name = link_name.split('/')[-1]
                    else:
                        name = video_url['audio_link'].split('/')[-1].split(
                            '.')[0] + '.mp4'
                elif video_type == 2:
                    link_name = video_url['video_mp4_link']
                    name = link_name.split('/')[-1]
                if name.count('_') > 1:
                    name = name.split('_')[1:]
                    name = '_'.join(name)
            file_path = os.path.join(folder, name)

            if helper.file_exist(file_path):
                continue
            helper.download_file(con, link, file_path)
Example #16
0
	imported_modules = helper.find_modules("modules", AbstractModule)
	# print the name of every found module
	print "Imported modules:"
	for i in range(len(imported_modules)):
		print imported_modules[i]()
	print"\n"

	while(1):
		# fetch json profile from server
		# download the profile file
		# the following if statement only belongs to my vm settings, if I
		# start the program under the host, the url is the localhost,
		# otherwise it's the given ip address
		file_name = ""
		if platform.node() == "scimitar":
			file_name =	helper.download_file("http://localhost/cloud4all/profile_json.txt",	True)
		else:
			file_name =	helper.download_file("http://10.0.2.2/cloud4all/profile_json.txt",	True)
		if os.path.exists(file_name) == False:
			log.log_msg("Error: Download of the profile file failed.", "error")
			sys.exit(1)
		f = open(file_name, "r")
		json_string = f.read()
		# parse the string
		try:
			user_profile = json.loads(json_string)
		except ValueError:
			log.log_msg("Error: Parsing of the user profile failed.", "error")
			sys.exit(2)

		dev_profile = DeviceProfile()
Example #17
0
    def sync(self):
        if not self.session:
            raise Exception("You need to login() first.")
        if not self.courses:
            raise Exception("You need to get_courses() first.")
        if not self.sections:
            raise Exception("You need to get_sections() first.")

        ### Syncing all courses

        for course_id, session_key, semestername, coursename in self.sections.keys(
        ):
            print(f"Syncing {coursename}...")
            for sec in self.sections[course_id, session_key, semestername,
                                     coursename]:
                sectionname = helper.clean_filename(
                    sec.select_one(".sectionname").get_text())
                #print(f"[{datetime.now()}] Section {sectionname}")
                mainsectionpath = os.path.join(self.config["basedir"],
                                               semestername, coursename,
                                               sectionname)

                # Categories can be multiple levels deep like folders, see https://moodle.rwth-aachen.de/course/view.php?id=7053&section=1

                label_categories = sec.findAll(
                    "li", {
                        "class": [
                            "modtype_label", "modtype_resource", "modtype_url",
                            "modtype_folder", "modtype_assign", "modtype_page"
                        ]
                    })

                categories = []
                category = None
                for l in label_categories:
                    # Create a category for all labels if enableExperimentalCategories is set
                    if "modtype_label" in l['class'] and self.config[
                            "enableExperimentalCategories"]:
                        category = (helper.clean_filename(
                            l.findAll(text=True)[-1]), [])
                        categories.append(category)
                    else:
                        if category == None:
                            category = (None, [])
                            categories.append(category)
                        category[1].append(l)

                ## Download Opencast Videos directly embedded in section
                helper.scan_for_opencast(sec, course_id, session_key,
                                         mainsectionpath, self.session)

                for category_name, category_soups in categories:
                    if category_name == None:
                        sectionpath = mainsectionpath
                    else:
                        sectionpath = os.path.join(mainsectionpath,
                                                   category_name)
                    for s in category_soups:
                        mod_link = s.find('a', href=True)
                        if not mod_link:
                            continue
                        mod_link = mod_link["href"]

                        ## Get Resources
                        if "modtype_resource" in s["class"]:
                            # First check if the file is directly accessible:
                            if helper.download_file(mod_link, sectionpath,
                                                    self.session):
                                continue
                            # If no file was found, then it could be an html page with an enbedded video
                            response = self.session.get(mod_link,
                                                        params=self.params)
                            if "Content-Type" in response.headers and "text/html" in response.headers[
                                    "Content-Type"]:
                                tempsoup = bs(response.text,
                                              features="html.parser")
                                videojs = tempsoup.select_one(".video-js")
                                if videojs:
                                    videojs = videojs.select_one("source")
                                    if videojs and videojs.get("src"):
                                        helper.download_file(
                                            videojs["src"], sectionpath,
                                            self.session,
                                            videojs["src"].split("/")[-1])

                        ## Get Resources in URLs
                        if "modtype_url" in s["class"]:
                            url = None
                            try:
                                response = self.session.head(
                                    mod_link, params=self.params)
                                if "Location" in response.headers:
                                    url = response.headers["Location"]
                                    response = self.session.head(
                                        url, params=self.params)
                                    if "Content-Type" in response.headers and "text/html" not in response.headers[
                                            "Content-Type"]:
                                        # Don't download html pages
                                        helper.download_file(
                                            url, sectionpath, self.session)
                                    elif "engage.streaming.rwth-aachen.de" in url:
                                        # Maybe its a link to an OpenCast video
                                        helper.downloadOpenCastVideos(
                                            url, course_id, session_key,
                                            sectionpath, self.session)
                            except:
                                # Maybe the url is down?
                                print(f"Error while downloading url {url}")

                        ## Get Folders
                        if "modtype_folder" in s["class"]:
                            response = self.session.get(mod_link,
                                                        params=self.params)
                            soup = bs(response.text, features="html.parser")
                            soup_results = soup.find("a", {"title": "Folder"})

                            if not soup_results:
                                # page has no title?
                                continue

                            foldername = helper.clean_filename(
                                soup_results.text)
                            filemanager = soup.select_one(
                                ".filemanager").findAll('a', href=True)
                            # Scheiß auf folder, das mach ich 1 andernmal
                            for file in filemanager:
                                link = file["href"]
                                filename = file.select_one(".fp-filename").text
                                helper.download_file(
                                    link, os.path.join(sectionpath,
                                                       foldername),
                                    self.session, filename)

                        ## Get Assignments
                        if "modtype_assign" in s["class"]:
                            response = self.session.get(mod_link,
                                                        params=self.params)
                            soup = bs(response.text, features="html.parser")
                            soup_results = soup.find("a",
                                                     {"title": "Assignment"})

                            if not soup_results:
                                # page has no title?
                                continue

                            foldername = helper.clean_filename(
                                soup_results.text)
                            files = soup.select(".fileuploadsubmission")
                            for file in files:
                                link = file.find('a', href=True)["href"]
                                filename = file.text
                                helper.download_file(
                                    link, os.path.join(sectionpath,
                                                       foldername),
                                    self.session, filename)

                        ## Get embedded videos in pages
                        if "modtype_page" in s["class"]:
                            response = self.session.get(mod_link,
                                                        params=self.params)
                            soup = bs(response.text, features="html.parser")
                            soup_results = soup.find("a", {"title": "Page"})

                            if not soup_results:
                                # page has no title?
                                continue

                            pagename = helper.clean_filename(soup_results.text)
                            path = os.path.join(sectionpath, pagename)

                            # Youtube videos
                            helper.scanAndDownloadYouTube(soup, path)

                            # OpenCast videos
                            helper.scan_for_opencast(soup, course_id,
                                                     session_key, path,
                                                     self.session)