Exemple #1
0
def install():
    print "Nothing yet"
    dl_dir = "C:\\ReviewBoard\\Downloads\\"
    p = subprocess.Popen(["svn", "--version", "--quiet"], \
    stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    n = p.communicate()[0]
    m = re.search('[\d]+\W[\d]+\W[\d]+', n)
    svn_version = m.group(0)
    print svn_version
    if sys.version_info[0] == 2:
        p_version_minor = sys.version_info[1]
        if p_version_minor == 6 and svn_version == "1.5.6":
            url = "http://pysvn.tigris.org/files/documents/1233/47202/"\
                + "py26-pysvn-svn156-1.7.2-1280.exe"
            pysvn = "py26-pysvn-svn156-1.7.2-1280.exe"
        elif p_version_minor == 6 and svn_version == "1.6.12":
            url = "http://pysvn.tigris.org/files/documents/1233/48016/"\
                + "py26-pysvn-svn1612-1.7.4-1321.exe"
            pysvn = "py26-pysvn-svn1612-1.7.4-1321.exe"
        elif p_version_minor == 6 and svn_version >= "1.6.15":
            url = "http://pysvn.tigris.org/files/documents/1233/48844/"\
                + "py26-pysvn-svn1615-1.7.5-1360.exe"
            pysvn = "py26-pysvn-svn1615-1.7.5-1360.exe"
        elif p_version_minor == 7 and svn_version == "1.6.12":
            url = "http://pysvn.tigris.org/files/documents/1233/48019/"\
                "py27-pysvn-svn1612-1.7.4-1321.exe"
            pysvn = "py27-pysvn-svn1612-1.7.4-1321.exe"
        elif p_version_minor == 7 and svn_version >= "1.6.15":
            url = "http://pysvn.tigris.org/files/documents/1233/48847/"\
                + "py27-pysvn-svn1615-1.7.5-1360.exe"
            pysvn = "py27-pysvn-svn1615-1.7.5-1360.exe"
    download(url, dl_dir, pysvn)
    subprocess.Popen([dl_dir + pysvn, "/silent"]).wait()
Exemple #2
0
def data_deal(list1):#list1是从spider.py接受到的原始数据
    list2 = []

    if list1 != []:
        for person in list1:
            student = data2(person.student_ID, person.name, person.department, person.major, person.grade,
                            person.graduate_time, person.student_status, person.failed_number, person.center_credits,
                            person.courses_must_to_take, person.a_group, person.b_group, person.c_group, person.d_group,
                            person.professional_elective_courses, person.enterprise_education_courses,
                            person.general_courses, person.others, '无', '无')
            student.change()

            #处理one_direction, another_direction两项
            a = student.a_group.replace("\xc2\xa0", " ").split(',')
            b = student.b_group.replace("\xc2\xa0", " ").split(',')
            c = student.c_group.replace("\xc2\xa0", " ").split(',')
            d = student.d_group.replace("\xc2\xa0", " ").split(',')
            tmp = [a,b,c,d]
            for group in tmp:
                if group[2] == ' ':
                    group[2] = 0
                if int(group[2]) + int(group[3]) >= int(group[1]):
                    student.one_direction = group[0]
                elif int(group[2]) + int(group[3]) >= 6:
                    student.another_direction = group[0]

            list2.append(student)

    get_and_post.add(list2)#储存数据
    download(list2)
    return list2
def getLogos(label, url):
    path    = os.path.join(EXTRAS, 'logos')
    zipfile = os.path.join(path,   'logos.zip')
    
    if utils.DialogYesNo('Would you like to install ' + label, 'and make it your active logo-pack?', 'It will be downloaded and installed into your system.'):
        download(path, zipfile)
        utils.DialogOK(label + ' logo-pack has been installed successfully.', 'It is now set as your active logo-pack.', 'Please restart On-Tapp.TV. Thank you.')
        OTT_ADDON.setSetting('dixie.logo.folder', label)
Exemple #4
0
def load_mnist_labels(filename):
    if not os.path.exists(filename):
        download(filename)
    # Read the labels in Yann LeCun's binary format.
    with gzip.open(filename, 'rb') as f:
        data = np.frombuffer(f.read(), np.uint8, offset=8)
    # The labels are vectors of integers now, that's exactly what we want.
    return data
Exemple #5
0
def get(what, name):
    found = False

    if what == "album":
        songs = getAlbum(name)
        found = True
    elif what == "playlist":
        songs = getPlaylist(name)
        found = True
    else:
        print("Incorrect Type")

    if found and len(songs) > 0:
        dirpath = str(Path.home()) + "/Spotify"

        try:
            # Create target Directory
            os.mkdir(dirpath)
            print("Directory Spotify created ")
        except FileExistsError:
            pass

        try:
            # Create target Directory
            os.mkdir(dirpath + "/" + name)
            print("Directory ", name, " created ")
        except FileExistsError:
            pass

        path = dirpath + "/" + name

        counter = 0
        for song in songs:
            try:
                download(
                    youtube_query(song.title + " " + song.artist + " lyrics"),
                    path, song.title)
            except youtube_dl.utils.ExtractorError:
                pass
            counter += 1
            file_path = path + "/" + song.title + ".mp3"
            audiofile = eyed3.load(file_path)
            audiofile.tag.artist = song.artist
            audiofile.tag.title = song.title
            audiofile.tag.album = song.album
            audiofile.tag.track_num = counter
            try:
                response = urlopen(song.image_url)
                imagedata = response.read()
                audiofile.tag.images.set(3, imagedata, "image/jpeg", u"")
            except urllib.error.HTTPError as e:
                print(e)
            audiofile.tag.save()
    else:
        print(name + " not found")
def installSF(sfZip):
    sfData = os.path.join('special://profile', 'addon_data',
                          'plugin.program.super.favourites')
    sfDir = xbmc.translatePath(sfData)
    path = os.path.join(sfDir, 'Super Favourites')
    zipfile = os.path.join(path, 'sfZip.zip')

    if not os.path.isdir(path):
        sfile.makedirs(path)

    download(sfZip, path, zipfile)
Exemple #7
0
def install():
    if platform.architecture()[0] == "32bit":
        path = "C:\\Program Files\\Perforce\\"
    else:
        path = "C:\\Program Files (x86)\\Perforce\\"
    file_name = "perforce.exe"
    url = "http://www.perforce.com/downloads/perforce/r10.2/bin.ntx86/perforce.exe"
    dl_dir = "C:\\ReviewBoard\\Downloads"
    download(url, dl_dir, file_name)
    call(["C:\ReviewBoard\Downloads\perforce.exe", "/S", "/v", "/qn"])
    setPath(path)
Exemple #8
0
    def run(self):
        while self.running:
            if urls:
                url, name, category, domain = urls.pop()
                print(category, name)
                notif("{} {}:at {}".format(category, name, len(urls)),
                      title=domain,
                      subtitle=url)
                download(url, category, name)

                if not urls:
                    notif("empty!!", title="pydown")
Exemple #9
0
def install():
    dl_dir = "C:\\ReviewBoard\\Downloads\\"
    if platform.architecture()[0] == '32bit':
        bit = "win32"
    else:
        bit = "win-amd64"
    file_name = "pywin32-216." + bit + "-py" + str(sys.version_info[0]) \
    + "." + str(sys.version_info[1]) + ".exe"
    url = "http://sourceforge.net/projects/pywin32/files/pywin32/Build216/" \
    + file_name + "/download"
    download(url, dl_dir, file_name)
    subprocess.Popen([dl_dir + file_name, "/silent"]).wait()
Exemple #10
0
def getLogos(label, url):
    path = os.path.join(EXTRAS, 'logos')
    zipfile = os.path.join(path, 'logos.zip')

    if utils.DialogYesNo(
            'Would you like to install ' + label,
            'and make it your active logo-pack?',
            'It will be downloaded and installed into your system.'):
        download(path, zipfile)
        utils.DialogOK(label + ' logo-pack has been installed successfully.',
                       'It is now set as your active logo-pack.',
                       'Please restart On-Tapp.TV. Thank you.')
        OTT_ADDON.setSetting('dixie.logo.folder', label)
Exemple #11
0
def load_mnist_images(filename):
    if not os.path.exists(filename):
        download(filename)
    # Read the inputs in Yann LeCun's binary format.
    with gzip.open(filename, 'rb') as f:
        data = np.frombuffer(f.read(), np.uint8, offset=16)
    # The inputs are vectors now, we reshape them to monochrome 2D images,
    # following the shape convention: (examples, channels, rows, columns)
    data = data.reshape(-1, 1, 28, 28)
    # The inputs come as bytes, we convert them to float32 in range [0,1].
    # (Actually to range [0, 255/256], for compatibility to the version
    # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
    return data / np.float32(256)
def getLogos(label, url):
    path = os.path.join(EXTRAS, "logos")
    zipfile = os.path.join(path, "logos.zip")

    if utils.DialogYesNo(
        "Would you like to install " + label,
        "and make it your active logo-pack?",
        "It will be downloaded and installed into your system.",
    ):
        download(path, zipfile)
        utils.DialogOK(
            label + " logo-pack has been installed successfully.",
            "It is now set as your active logo-pack.",
            "Please restart On-Tapp.TV. Thank you.",
        )
        OTT_ADDON.setSetting("dixie.logo.folder", label)
Exemple #13
0
	def si(self,s):
		lan = self.lan
		sa = 0
		sw = 0
		if int(s) == 0:
			sa = 1
			a = 'a'
			return a
		elif int(s) == 100:
			sw = 1
			a = 'w'
			return a
		elif int(s) < 0:
			o = int(s) * (-1)
			a = self.si(o)
			return a
		elif int(s) < 13:
			from download import download
			thisisthedownloadinstance = download(int(s),lan.dictu(),self.tor,self.torin)
			a = thisisthedownloadinstance.raz()
			return a
		else:
			jezodict = lan.dictu()
			print "%s: %s" % jezodict[badstacparam],str(s)
			quit()
Exemple #14
0
def install():
    if platform.architecture()[0] == '32bit':
        path = "C:\\Program Files\\GnuWin32\\"
    else:
        path = "C:\\Program Files (x86)\\GnuWin32\\"
    file_name = "patch-2.5.9-7-setup.exe"
    url = \
    "http://downloads.sourceforge.net/project/gnuwin32/patch/2.5.9-7/" \
    + "patch-2.5.9-7-setup.exe?r=http%3A%2F%2Fsourceforge.net%2Fproject" \
    + "%2Fdownloading.php%3Fgroupname%3Dgnuwin32%26file_name%3Dpatch-2." \
    + "5.9-7-setup.exe%26use_mirror%3Dsurfnet&ts=1308215304&use_mirror=" \
    + "cdnetworks-us-1"
    dl_dir = "C:\\ReviewBoard\\Downloads"
    download(url, dl_dir, file_name)
    call([dl_dir + "\\" + file_name, "/silent"])
    setPath(path)
Exemple #15
0
def install():
    if sys.version_info[0] == 2:
        url = "http://mercurial.berkwood.com/binaries/"
        p_version_minor = sys.version_info[1]

        if p_version_minor < 4:
            mercurial_version = "Mercurial-1.0.exe"
        elif p_version_minor > 4 and p_version_minor < 5:
            mercurial_version = "Mercurial-1.4.exe"
        elif p_version_minor >= 5:
            mercurial_version = "Mercurial-1.4.3.exe"

        url += mercurial_version

    folder = "C:\\ReviewBoard\\Downloads\\"
    download(url, folder, mercurial_version)
    call([folder + mercurial_version, "/qn"])
Exemple #16
0
def download_data():
    if request.method == 'POST':
        if validate_json(request.data):
            return download(request.data)
        else:
            error = 'Invalid input data'
            return error
    else:
        error = 'Only accepts POST method'
        return error
Exemple #17
0
def botman(output):
	global knocktime
	global username
	try:
		d = output.split("!")
		i = d[1].split(" ")
		o = string.replace(output, i[0] + ' ', '')
		command = i[0].replace("!", "")
		print prefix + command + " received."
		if command == 'knocktime':
			knocktime = int(i[1])
		if command == 'update':
			download().update(i[1], i[2])
		if command == 'download':
			download().download(i[1], i[2])
		if command == 'downloadexec':
			download().downloadexec(i[1], i[2])
		if command == 'terminal':
			os.popen(o)
		if command == 'get':
			dosman('get', i[1], i[2], i[3], i[4])
		if command == 'slowget':
			dosman('getslow', i[1], i[2], i[3], i[4])
		if command == 'udp':
			dosman('udp', i[1], i[2], i[3], i[4])
		if command == 'udplag':
			dosman('udplag', i[1], i[2], i[3], i[4])
		if command == 'click':
			thread.start_new_thread(settings.clickad, (i[1], useragent))
		return True
	except: 
		return False
Exemple #18
0
def process_files(dataset, shared_list, finished_list, config_info):
    process_name = multiprocessing.current_process().name
    url = dataset['url']
    data_type = dataset['type']
    data_sub_type = dataset['subtype']
    filename = dataset['filename']
    save_path = '/usr/src/app/tmp'

    if dataset['status'] == 'active':
        if url not in shared_list and url not in finished_list:
            shared_list.append(url)
            download(process_name, url, filename, save_path)
            shared_list.remove(url)
            if 'filename_uncompressed' in dataset:
                decompress(process_name, filename, save_path)
            finished_list.append(url)
        elif url in finished_list:
            logger.info(
                '{}: URL already downloaded via another process: {}'.format(
                    process_name, url))
        elif url in shared_list:
            logger.info(
                '{}: URL already downloading via another process: {}'.format(
                    process_name, url))
            logger.info(
                '{}: Waiting for other process\'s download to finish.'.format(
                    process_name))
            while url not in finished_list:
                time.sleep(10)

        if 'filename_uncompressed' in dataset:
            filename = dataset['filename_uncompressed']
            logger.info(
                '{}: Found uncompressed filename entry, uploading {}.'.format(
                    process_name, dataset['filename_uncompressed']))
        upload_process(process_name, filename, save_path, data_type,
                       data_sub_type, config_info)
def checkData(satName, lcycle, ogdr_files_page_URL, html, lfile):
    print()
    print("INSIDE CHECK DATA FUNCTION")
    print("satName={} ".format(satName))
    print("lcycle={} lfile={}".format(lcycle, lfile))
    print("ogdr_files_page_URL={} ".format(ogdr_files_page_URL, lfile))
    print()
    print("reading files")
    for line in html:
        if satName in line:
            start = line.index(satName)
            end = line.index('.nc') + 3
            name = line[start:end]
            if name > lfile:
                print("new file found :", name)
                print("*****donwloading new netCDF file ....*****")
                download(satName, ogdr_files_page_URL + '/' + name, name,
                         lcycle)
                print("*****download completed.*****")
                print()
                # print("*****processing the new file*****")
                # process.process(satName, lfile)
                # print("*****processing complete.*****")
                # print()
                lfile = name
                # writing updates to JAx_last.txt
                print("writing last downloads info to", satName + '_last.txt')
                print('lcycle={} lfile={}'.format(lcycle, lfile))
                with open(
                        op.join(app_dir, 'data', 'last',
                                satName + '_last.txt'), "w") as f:
                    f.write(lcycle + '\n')
                    f.write(lfile + '\n')
                print('write complete')
                print("******")
    return lfile
Exemple #20
0
def init_page_queue(number_downloader_threads: int = 1) -> List[str]:
	ret = []
	number_pages = get_number_pages(download(get_listing_url()))
	page_numbers = [i for i in range(1, number_pages + 1)]
	shuffle(page_numbers)
	pages_html = download_multiple({get_listing_url(GAME_VERSION, i) for i in page_numbers})

	with concurrent.futures.ProcessPoolExecutor(max_workers=number_downloader_threads) as executor:
		project_ids = {
			executor.submit(get_project_links, p): p
			for p in pages_html if p is not None
		}
		for future in concurrent.futures.as_completed(project_ids):
			ret.extend(future.result())
	return ret
Exemple #21
0
    def run(self):
        dl_name = download(self)
        self.dl_name = dl_name

        if dl_name and self.stopped:
            try:
                os.remove(self.dl_directory + '/' + dl_name)
            except:
                print(f'Failed to remove: {self.dl_directory}/{dl_name}')

        if self.paused:
            self.signals.update_signal.emit(self.data, [None, None, 'Paused', '0 B/s'])
        else:
            if not dl_name:
                self.complete = True
Exemple #22
0
def iteration(url, index_begin=1, index_end=-1, max_errors=5, getPage=False):
	""" 按下标进行排布的链接可以通过这个方法批量获取
	"""
	num_errors = 0
	links = [] if not getPage else {}
	page = None
	for index in itertools.count(index_begin):
		index_url = '%s-%d' % (url, index)
		try: page = download(url)
		except: pass
		if page is not None:
			num_errors = 0
			if not getPage:
				links.append(index_url)
			else:
				links[index_url]= page
		else:
			num_errors += 1
			if num_errors >= max_errors:
				break
		if index == index_end:
			break

	return links
def install():
    dl_dir = "C:\\ReviewBoard\\Downloads"
    download(url, dl_dir, file_name)
    call([pilpath, "/qn"])
Exemple #24
0
def get_data_from_file(username, code_list, name_list):  #获取数据
    list2 = []
    list1 = []
    filename = 'static\\' + username + '.txt'
    f = open(filename, 'r')
    file_list = f.readlines()
    f.close()
    for one in file_list:
        one = one.decode('utf-8')
        one_list = one.split(',,')
        data_class = data1(one_list[0], one_list[1], one_list[2], one_list[3],
                           one_list[4], one_list[5], one_list[6], one_list[7],
                           one_list[8], one_list[9], one_list[10],
                           one_list[11], one_list[12], one_list[13],
                           one_list[14], one_list[15], one_list[16],
                           one_list[17])
        list1 += [data_class]
    if list1 != []:
        for person in list1:
            student = data2(person.student_ID, person.name, person.department,
                            person.major, person.grade, person.graduate_time,
                            person.student_status, person.failed_number,
                            person.center_credits, person.courses_must_to_take,
                            person.a_group, person.b_group, person.c_group,
                            person.d_group,
                            person.professional_elective_courses,
                            person.enterprise_education_courses,
                            person.general_courses, person.others, '无', '无')
            change(student, code_list)

            #处理one_direction, another_direction两项
            a = student.a_group.replace("\xc2\xa0", " ").split(',')
            b = student.b_group.replace("\xc2\xa0", " ").split(',')
            c = student.c_group.replace("\xc2\xa0", " ").split(',')
            d = student.d_group.replace("\xc2\xa0", " ").split(',')
            tmp = [a, b, c, d]
            for group in tmp:
                if len(group) > 2:
                    if group[2] == ' ':
                        group[2] = 0
            if int(a[2]) + int(a[3]) >= 15:
                student.one_direction = a[0]
                student.another_direction = int(b[2]) + int(b[3]) + int(
                    c[2]) + int(c[3])

            elif int(b[2]) + int(b[3]) >= 15:
                student.one_direction = b[0]
                student.another_direction = int(a[2]) + int(a[3]) + int(
                    c[2]) + int(c[3])

            elif int(c[2]) + int(
                    c[3]) >= 12 and student.others['SE315'] == '通过':
                student.one_direction = c[0]
                student.another_direction = int(a[2]) + int(a[3]) + int(
                    b[2]) + int(b[3]) - 3

            else:
                student.another_direction = int(a[2]) + int(a[3]) + int(
                    c[2]) + int(c[3]) + int(b[2]) + int(b[3])

            list2.append(student)
    download(username, list2, code_list, name_list)
    return list2
Exemple #25
0
def download_files():
    subprocess.call("rm downloads/* 2>/dev/null", shell=True)
    download(ois_reports)
Exemple #26
0
def main(_):
    # Get the raw_data.
    train_data_filename = download('train-images-idx3-ubyte.gz')
    train_labels_filename = download('train-labels-idx1-ubyte.gz')
    test_data_filename = download('t10k-images-idx3-ubyte.gz')
    test_labels_filename = download('t10k-labels-idx1-ubyte.gz')

    # Extract it into numpy arrays.
    train_data = extract_data(train_data_filename, 60000)
    train_labels = extract_labels(train_labels_filename, 60000)
    test_data = extract_data(test_data_filename, 10000)
    test_labels = extract_labels(test_labels_filename, 10000)

    # Generate a validation set.
    validation_data = train_data[:VALIDATION_SIZE, ...]
    validation_labels = train_labels[:VALIDATION_SIZE]
    train_data = train_data[VALIDATION_SIZE:, ...]
    train_labels = train_labels[VALIDATION_SIZE:]
    num_epochs = NUM_EPOCHS

    train_size = train_labels.shape[0]
    x = tf.placeholder(
        data_type(),
        shape=[
            BATCH_SIZE,
            IMAGE_SIZE,
            IMAGE_SIZE,
            NUM_CHANNELS])
    y = tf.placeholder(tf.int64, shape=[BATCH_SIZE, ])
    eval_data = tf.placeholder(data_type(),
                               shape=(EVAL_BATCH_SIZE,
                                      IMAGE_SIZE,
                                      IMAGE_SIZE,
                                      NUM_CHANNELS))

    # The variables below hold all the trainable weights.
    conv1_weights = tf.Variable(tf.truncated_normal([11, 11, NUM_CHANNELS, 64],
                                                    stddev=0.1,
                                                    seed=SEED,
                                                    dtype=data_type()))
    conv1_biases = tf.Variable(
        tf.zeros([64], dtype=data_type()))

    conv2_weights = tf.Variable(tf.truncated_normal([5, 5, 64, 192],
                                                    stddev=0.1,
                                                    seed=SEED,
                                                    dtype=data_type()))
    conv2_biases = tf.Variable(
        tf.constant(
            0.1,
            shape=[192],
            dtype=data_type()))

    conv3_weights = tf.Variable(tf.truncated_normal([3, 3, 192, 384],
                                                    stddev=0.1,
                                                    seed=SEED,
                                                    dtype=data_type()))
    conv3_biases = tf.Variable(
        tf.constant(
            0.1,
            shape=[384],
            dtype=data_type()))

    conv4_weights = tf.Variable(tf.truncated_normal([3, 3, 384, 256],
                                                    stddev=0.1,
                                                    seed=SEED,
                                                    dtype=data_type()))
    conv4_biases = tf.Variable(
        tf.constant(
            0.1,
            shape=[256],
            dtype=data_type()))

    conv5_weights = tf.Variable(tf.truncated_normal([3, 3, 256, 256],
                                                    stddev=0.1,
                                                    seed=SEED,
                                                    dtype=data_type()))
    conv5_biases = tf.Variable(
        tf.constant(
            0.1,
            shape=[256],
            dtype=data_type()))

    # fully connected, depth 1024
    fc1_weights = tf.Variable(tf.truncated_normal([1 * 1 * 256, 4096],
                                                  stddev=0.1,
                                                  seed=SEED,
                                                  dtype=data_type()))
    fc1_biases = tf.Variable(
        tf.constant(
            0.1,
            shape=[4096],
            dtype=data_type()))

    fc2_weights = tf.Variable(tf.truncated_normal([4096, 4096],
                                                  stddev=0.1,
                                                  seed=SEED,
                                                  dtype=data_type()))
    fc2_biases = tf.Variable(
        tf.constant(
            0.1,
            shape=[4096],
            dtype=data_type()))

    fc3_weights = tf.Variable(tf.truncated_normal([4096, NUM_LABELS],
                                                  stddev=0.1,
                                                  seed=SEED,
                                                  dtype=data_type()))
    fc3_biases = tf.Variable(
        tf.constant(
            0.1,
            shape=[10],
            dtype=data_type()))

    def model(data):
        """The logs definition"""
        # Conv 1
        with tf.name_scope('conv1'):
            conv1 = tf.nn.conv2d(data,
                                 conv1_weights,
                                 strides=[1, 4, 4, 1],
                                 padding='SAME')
            # Bias and rectified linear non_linearity.
            relu = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
            norm = tf.nn.local_response_normalization(relu,
                                                      depth_radius=2,
                                                      bias=2.0,
                                                      alpha=1e-4,
                                                      beta=0.75)
            print_activations(conv1)
        # Max pooling.The kernel size spec {ksize} also follows the layout.
        pool1 = tf.nn.max_pool(norm,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME')
        print_activations(pool1)

        # Conv 2
        with tf.name_scope('conv2'):
            conv2 = tf.nn.conv2d(pool1,
                                 conv2_weights,
                                 strides=[1, 1, 1, 1],
                                 padding='SAME')
            # Bias and rectified linear non_linearity.
            relu = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
            norm = tf.nn.local_response_normalization(relu,
                                                      alpha=1e-4,
                                                      beta=0.75,
                                                      depth_radius=2,
                                                      bias=2.0)
            print_activations(conv2)
        # Max pooling.The kernel size spec {ksize} also follows the layout.
        pool2 = tf.nn.max_pool(norm,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME')
        print_activations(pool2)

        # Conv 3
        with tf.name_scope('conv3'):
            conv3 = tf.nn.conv2d(pool2,
                                 conv3_weights,
                                 strides=[1, 1, 1, 1],
                                 padding='SAME')
            # Bias and rectified linear non_linearity.
            relu = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases))
            print_activations(conv3)

        # Conv 4
        with tf.name_scope('conv4'):
            conv4 = tf.nn.conv2d(relu,
                                 conv4_weights,
                                 strides=[1, 1, 1, 1],
                                 padding='SAME')
            # Bias and rectified linear non_linearity.
            relu = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases))
            norm = tf.nn.lrn(relu,
                             4,
                             bias=1.0,
                             alpha=0.001 / 9.0,
                             beta=0.75)
            print_activations(conv4)

        # Conv 5
        with tf.name_scope('conv5'):
            conv5 = tf.nn.conv2d(norm,
                                 conv5_weights,
                                 strides=[1, 1, 1, 1],
                                 padding='SAME')
            # Bias and rectified linear non_linearity.
            relu = tf.nn.relu(tf.nn.bias_add(conv5, conv5_biases))
            print_activations(conv5)
        # Max pooling.The kernel size spec {ksize} also follows the layout.
        pool5 = tf.nn.max_pool(relu,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME')
        print_activations(pool5)

        # Fully 1
        fc1 = tf.reshape(pool5, [-1, fc1_weights.get_shape().as_list()[0]])
        fc1 = tf.nn.relu(tf.matmul(fc1, fc1_weights) + fc1_biases)
        # dropout
        fc1 = tf.nn.dropout(fc1, 0.5)

        # Fully 2
        fc2 = tf.reshape(fc1, [-1, fc2_weights.get_shape().as_list()[0]])
        fc2 = tf.nn.relu(tf.matmul(fc2, fc2_weights) + fc2_biases)
        # dropout
        fc2 = tf.nn.dropout(fc2, 0.5)

        # Fully 3
        fc3 = tf.reshape(fc2, [-1, fc3_weights.get_shape().as_list()[0]])
        out = tf.nn.relu(tf.matmul(fc3, fc3_weights) + fc3_biases)
        return out

    # Training computation: logits + cross_entropy loss.
    logits = model(x)
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=y))

    # L2 regularization for the fully connected parameters.
    regularizers = (tf.nn.l2_loss(fc1_weights) +
                    tf.nn.l2_loss(fc1_biases) +
                    tf.nn.l2_loss(fc2_weights) +
                    tf.nn.l2_loss(fc2_biases) +
                    tf.nn.l2_loss(fc3_weights) +
                    tf.nn.l2_loss(fc3_biases))
    loss += 5e-4 * regularizers

    # Optimizer: set up a variable that's incremented once per batch
    # controls the learning rate decay.
    batch = tf.Variable(0, dtype=data_type())
    # Decay once per epoch, using an exponential schedule starting at 0.01.
    learning_rate = tf.train.exponential_decay(
        0.01,
        batch * BATCH_SIZE,
        train_size,
        0.95,
        staircase=True)
    # Use Adam for the optimization.
    optimizer = tf.train.AdamOptimizer(
        learning_rate=learning_rate,
        beta1=0.9).minimize(loss=loss, global_step=batch)

    # Predictions for the current training minibatch.
    train_prediction = tf.nn.softmax(logits)

    # Predictions for the test and validation.
    eval_prediction = tf.nn.softmax(model(eval_data))

    def eval_in_batch(session, data):
        """Get all predictions for a dataset by running."""
        size = data.shape[0]
        if size < EVAL_BATCH_SIZE:
            raise ValueError("Batch size for evals larges than dataset:")
        pred = np.ndarray(shape=(size, NUM_LABELS))
        for begin in range(0, size, EVAL_BATCH_SIZE):
            end = begin + EVAL_BATCH_SIZE
            if end <= size:
                pred[begin:end, :] = session.run(eval_prediction, feed_dict={
                    eval_data: data[begin:end, ...]})
            else:
                batch_predictions = session.run(eval_prediction,
                                                feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
                pred[begin:, :] = batch_predictions[begin - size:, :]
        return pred
    start_time = time.time()
    with tf.Session() as sess:
        # Run all the initializers
        tf.global_variables_initializer().run()
        print("Init all variables complete!")
        for step in range(int(num_epochs * train_size) // BATCH_SIZE):
            offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
            batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
            batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
            feed_dict = {x: batch_data,
                         y: batch_labels}
            sess.run(optimizer, feed_dict=feed_dict)
            if step % EVAL_FREQUENCY == 0:
                # fetch some extra node's raw_data
                l, lr, predictions = sess.run([loss, learning_rate, train_prediction],
                                              feed_dict=feed_dict)
                elapsed_time = time.time() - start_time
                start_time = time.time()
                print(f"Step {step} "
                      f"(epoch {(float(step) * BATCH_SIZE / train_size):.2f}) "
                      f"{(1000 * elapsed_time / EVAL_FREQUENCY):.1f} ms")
                print(f"Minibatch loss: {l:.3f}, learning rate: {lr:.6f}")
                print(
                    f"Minibatch error: {error_rate(predictions,batch_labels):.1f}%")

                print(
                    f"Validation error: {error_rate(eval_in_batch(sess, validation_data), validation_labels):.1f}%")
                sys.stdout.flush()
                # Finally print the result!
        test_error = error_rate(eval_in_batch(sess, test_data), test_labels)
        print(f"Test error: {test_error:.1f}%.")
        if FLAGS.self_test:
            print(f"test_error {test_error}")
            assert test_error == 0.0, f"expected 0.0 test_error, got {test_error:.2f}"
Exemple #27
0
def data_deal(list1, username):  #list1是从spider.py接受到的原始数据
    list2 = []
    #默认配置
    code_list = [
        'SE112', 'SE418', 'SE419', 'SE420', 'SE422', 'SE417', 'SE315', 'EI901'
    ]
    name_list = [
        "软件工程职业素养, SE112", "软件产品设计与用户体验,SE418", "企业软件质量保证,SE419",
        "软件知识产权保护,SE420", "企业软件过程与管理,SE422", "软件工程经济学,SE417", "操作系统,SE315",
        "工程实践与科技创新,EI901"
    ]

    if list1 != []:
        for person in list1:
            student = data2(person.student_ID, person.name, person.department,
                            person.major, person.grade, person.graduate_time,
                            person.student_status, person.failed_number,
                            person.center_credits, person.courses_must_to_take,
                            person.a_group, person.b_group, person.c_group,
                            person.d_group,
                            person.professional_elective_courses,
                            person.enterprise_education_courses,
                            person.general_courses, person.others, '无', '无')

            change(student, code_list)

            #处理one_direction, another_direction两项
            a = student.a_group.replace("\xc2\xa0", " ").split(',')
            b = student.b_group.replace("\xc2\xa0", " ").split(',')
            c = student.c_group.replace("\xc2\xa0", " ").split(',')
            d = student.d_group.replace("\xc2\xa0", " ").split(',')
            tmp = [a, b, c, d]
            for group in tmp:
                if len(group) > 2:
                    if group[2] == ' ':
                        group[2] = 0
            if int(a[2]) + int(a[3]) >= 15:
                student.one_direction = a[0]
                student.another_direction = int(b[2]) + int(b[3]) + int(
                    c[2]) + int(c[3])

            elif int(b[2]) + int(b[3]) >= 15:
                student.one_direction = b[0]
                student.another_direction = int(a[2]) + int(a[3]) + int(
                    c[2]) + int(c[3])

            elif int(c[2]) + int(
                    c[3]) >= 12 and student.others['SE315'] == '通过':
                student.one_direction = c[0]
                student.another_direction = int(a[2]) + int(a[3]) + int(
                    b[2]) + int(b[3]) - 3

            else:
                student.another_direction = int(a[2]) + int(a[3]) + int(
                    c[2]) + int(c[3]) + int(b[2]) + int(b[3])

            list2.append(student)

    #储存数据
    filename = 'static\\' + username + '.txt'
    f = open(filename, 'w')
    for person in list1:
        one_person = u''
        one_person += str(person.student_ID) + ',,' + person.name + ',,' + person.department + ',,' + person.major + ',,' + \
            str(person.grade) + ',,' + person.graduate_time + ',,' + person.student_status + ',,' + str(person.failed_number) \
            + ',,' + str(person.center_credits) + ',,' + person.courses_must_to_take + ',,' + person.a_group + ',,' + person.b_group + ',,' +  \
            person.c_group + ',,' + person.d_group + ',,' + person.professional_elective_courses + ',,' + person.enterprise_education_courses \
            + ',,' + person.general_courses + ',,' + person.others
        f.write(one_person + '\n')
    f.close()
    download(username, list2, code_list, name_list)
    return list2
Exemple #28
0
def install():
    dl_dir = "C:\\ReviewBoard\\Downloads"
    download(url, dl_dir, file_name)
    call(["msiexec", "/i", \
    "C:\\ReviewBoard\\Downloads\\Silk-Subversion-1.6.17-win32.msi", "/qn"])
    print "end of installation"
Exemple #29
0
	def on_selection(self, event):
		w = event.widget
		selection = w.get(w.curselection()[0])
		info = metadata.dictionary(selection)
		self.general_page.update(info)
		self.update_icon(download(metadata.icon(selection)))
Exemple #30
0
def process_params(params):
    params = get_params(params)
    mode   = None

    try:    mode = int(params['mode'])
    except: pass

    try:    url = params['url']
    except: url = ''


    if mode == _RADIOROOT:
        try:    return radioRoot(url)
        except: pass


    if mode == _ONAIR:
        try:    return onAir(url)
        except: pass

        
    if mode == _LISTENAGAIN:
        try:    return listenAgain(url)
        except: pass


    if mode == _CHANNEL:
        try:    return channel(url, params['source'])
        except: pass


    if mode == _EPISODE:
        try:    return playEpisode(url, params['name'], params['thumb'], params['fanart'])
        except: pass


    if mode == _SHOW_DOWNLOAD:
        try:    return showDownload()
        except: pass


    if mode == _PLAY_DOWNLOAD:
        try:    return playDownload(url, params['name'])
        except: pass


    if mode == _DELETE:
        try:    
            deleteFile(url, params['name'])
            return refresh()
        except:
            pass


    if mode == _RESET:
        try:    
            ADDON.setSetting(url, '')
            return refresh()
        except:
            pass


    if mode == _DOWNLOAD:
        try:    return download(url, params['name'])
        except: pass

    main()
Exemple #31
0
from crawler import *
from download import *

parser = argparse.ArgumentParser(prog="download", conflict_handler="resolve")
parser.add_argument("--key",
                    "-k",
                    type=str,
                    required=True,
                    help="key word of the music to search")
args = parser.parse_args()

list_link = search(args.key)
print(list_link)
for link in list_link:
    download(link)
Exemple #32
0
    'NOV', 'DEC'
]
monthNumbers = range(0, 2, 1)

listOfDays = range(0, 31, 1)

pathToSave = "/Users/vinay/PycharmProjects/QuantTradingWithML/Download/src/downloadedFiles/nse/fo/"
secType = "FO"
for year in listOfYears:
    for monthInd in monthNumbers:
        for dayOfMonth in listOfDays:
            day = dayOfMonth + 1
            month = listOfMonths[monthInd]
            dateStr = str(year) + "-" + month + "-" + str(day)
            print "Starting Download for " + dateStr

            nseURL = constructNSEurl(secType, day, month, year)
            print nseURL
            saveAs = "fo" + str(day) + month + str(year) + "bhav.csv.zip"

            # weekday = weekDay(year, monthInd, day)
            isWeekend = False
            # weekday == (6, 'Saturday') or weekday == (0, 'Sunday')

            if not isWeekend and download(pathToSave + saveAs, nseURL):
                unzip(pathToSave + saveAs, pathToSave)
                time.sleep(10)
            else:
                print "Download wasn't successful for " + dateStr
                time.sleep(10)
Exemple #33
0
def download_media(user,
                   folder,
                   type='photo',
                   limit=None,
                   include_rts=True,
                   time_range=None):
    if time_range:
        start_time, end_time = time_range
    else:
        start_time = time.gmtime(0)
        end_time = time.gmtime()
    App = twitter_api(user)
    page_size = 20
    count = page_size
    if App.pinned_tweet_id != "":
        count = page_size + 1
        App.fetch(1)
        if (start_time < App.pinned_tweet.created_date < end_time):
            media_info = App.pinned_tweet.media_info(type)
            download([item for item in media_info], folder)
    remain = 20 if not limit else limit

    thread = []
    i = 0
    while True:
        # thread=[]
        # i=0
        if remain <= 20:
            count = remain if App.pinned_tweet_id == "" else remain + 1
        App.fetch(count)
        media_list = []
        for tweet_id, tweet in App.tweet_list.items():
            if (not include_rts) and (tweet.retweet_id != ""):
                continue
            if (tweet.created_date < start_time):
                return
            if not (tweet.created_date < end_time):
                continue
            if tweet.retweet_id != "":
                media_info = App.retweet_list[tweet.retweet_id].media_info(
                    type)
            else:
                media_info = tweet.media_info(type)
            media_list.extend(media_info)
        # thread_download(media_list,folder,4)
        # thread download
        if i < 4:
            t = threading.Thread(target=thread_download,
                                 args=(media_list, folder, 4))
            thread.append(t)
            t.start()
            i += 1
        if i == 4:
            for t in thread:
                t.join()
            i = 0
            thread = []

        remain = remain - page_size if remain > 20 else 0
        if not limit:
            remain = 20
        if remain <= 0 or len(App.timeline) == 0:
            break
Exemple #34
0
        currently_downloaded = float(numblocks) * blocksize / (1024 * 1024)
        kbps_speed = numblocks * blocksize / (time.time() - start_time)
        if kbps_speed > 0:
            eta = (filesize - numblocks * blocksize) / kbps_speed
        else:
            eta = 0
        kbps_speed = kbps_speed / 1024
        total = float(filesize) / (1024 * 1024)
        mbs = '%.02f MB of %.02f MB' % (currently_downloaded, total)
        e = 'Speed: %.02f Kb/s ' % kbps_speed
        e += 'ETA: %02d:%02d' % divmod(eta, 60)
        dp.update(percent, mbs, e, ' ')
    except:
        percent = 100
        dp.update(percent)
    if dp.iscanceled():
        dp.close()


def noconnection():
    dialog = xbmcgui.Dialog()
    dialog.ok("[COLOR=red][B] ## CONNECTION ERROR ##[/COLOR][/B]",
              "Unable to download needed data....", "Will Try Again.",
              "Press OK or Back to Continue")
    xbmc.sleep(1000)


#dp.close()
download(LOCATION, file2)
xbmc.sleep(1000)
xbmc.executebuiltin('RunAddon(plugin.video.link__tester)')
Exemple #35
0
 def __init__(self, pid, password):
     self.opener = self.login(pid, password)
     self.dl = download(self.opener)
     pass
# We are almost done with setting up our database.
# AS a final step we need to also insert the historical prices for various
# indices that trade on the NSE

# NIFTY, BANKNIFTY etc.
# Just like the cm and fo files, there is a daily file published by the NSE with
# index open, low, high ,close for all these indices.

# Let's first define a function to construct the url for this file
# https://www1.nseindia.com/content/indices/ind_close_all_03052016.csv

from download import *

list_of_years = [
    2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016
]

localDir = '/Users/swethakolalapudi/pytest/'
for year in list_of_years:
    for month in range(12):
        for day in range(31):
            url = constructIndexURL(day + 1, month + 1, year)
            fileName = url.split("/")[-1]
            download(localDir + fileName, url)
Exemple #37
0
def install_jar(name, url, filepath):
    dst = os.path.join(filepath, name)
    if os.path.isfile(dst):
        return dst
    download(url, dst)
    return dst
Exemple #38
0
def install():
    file_name = "Git-1.7.6-preview20110708.exe"
    url = "http://msysgit.googlecode.com/files/" + file_name
    dl_dir = "C:\\ReviewBoard\\Downloads"
    download(url, dl_dir, file_name)
    call([dl_dir + "\\" + file_name, "/silent"])
Exemple #39
0
def link_crawler(seed_url, link_regex=None, delay=2.0, max_depth=2, max_urls=-1, useProxy=True, local_file=None, save=False):
	""" 将网页中符合line_regex正则表达式的链接都筛选出来:
		seed_url		根链接
		link_regex		匹配的正则规则
		delay			延迟
		max_length		最大深度
		max_urls		最多储存的链接数
		local_file		本地保存路径
		save 			是否保存
	"""

	begin_time = datetime.now()
	pre_time = datetime.now()
	crawl_queue = Queue.deque([seed_url]) # 还需要爬的链接的队列
	seen = {seed_url: {'depth': 0}} # 初始深度为0 seen用来保存链接和遍历深度
	urls_num = 0 # 链接的个数
	pages_num = 0 # 页面的个数
	index_proxise = -1 # 设置代理的index,同时控制是否进行时延
	# rp = get_robots(seed_url) # robot禁止规则
	if useProxy:	# 从代理池获取IP
		proxies = getProxies(url=seed_url, delay=delay, protocol=1)
		get_proxies_time = (datetime.now() - begin_time).seconds
		num_proxise = len(proxies)
	throttle = Throttle(delay) # 速度阀门
	okProxy = useProxy and (num_proxise!=0)

	while crawl_queue: # 遍历链接
		url = crawl_queue.pop()
		if okProxy: #循环获取代理进行遍历
			if index_proxise == num_proxise:
				index_proxise = -1
		try:
			# if rp.can_fetch('Mozilla/5.0 (Windows NT 6.1; Win64; x64)', url):
			if not contain_zh(url): # 防止中文乱码的链接
				if index_proxise == -1: throttle.wait(url) # 进行限速
				depth = seen[url]['depth']
				print u'\n第[', pages_num+1, u']页 已爬取链接数目:', urls_num, u'  深度:', depth
				if okProxy:
					if index_proxise != -1:
						proxy = proxies[index_proxise] #这次使用的代理IP
						page = download(url=url, proxy=proxies[index_proxise])
					else:
						page = download(url) # 使用本机IP访问
					index_proxise += 1
				else:
					page = download(url)
				links = []
				if depth != max_depth:
					links_ = getbyre(page)
					if links_ is not None: # 防止空页面
						if link_regex: # 如果有给正则表达式则进行匹配
							links.extend(link for link in links_ if re.match(link_regex, link))
						else:
							links.extend(link for link in links_)
						for link in links:
							link = normalize(seed_url, link) # 将链接进行规范化,转为绝对链接
							if link not in seen: # 如果链接没有重复
								pre_time = datetime.now()
								a_link = {}
								a_link['depth'] = depth + 1
								seen[link] = a_link # 保存链接
								urls_num += 1
								if same_domain(seed_url, link): # 如果来自同一个域名, 将链接加入遍历队伍中
									crawl_queue.append(link)
				pages_num += 1
		except Exception, e:
			print u'访问超时或其他错误'
		# 如果爬取的链接数目超出上限了或者超过600秒链接数都不再增加,视为已经爬完,避免爬虫陷阱 or (datetime.now() - pre_time).seconds > 600
		if urls_num >= max_urls and max_urls != -1:
			break
Exemple #40
0
listOfYears= [2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016]

for year in listOfYears:
    for month in listOfMonths:
        for dayOfMonth in range(31):

            day=dayOfMonth+1
            # range(31) will create a sequence starting from 0 but dates start from 1
            # so let's add 1

            nseURL=constructNSEurl("CM",day,month,year)
            fileName="cm" + str(day) + month +str(year)+"bhav.csv.zip"

            localFilePath = "/Users/swethakolalapudi/pytest/"

            download(localFilePath+fileName,nseURL)
            unzip(localFilePath+fileName,localFilePath)

            # This will first download and then unzip our file in the given location

            time.sleep(10)
            # We give it some time between each download request so we don't inadvertently
            # overwhelm the NSE website







Exemple #41
0
tracklist = []

try:
    if uri[1] == "user":
        print("\n" + "||||||||||||||||||||||||||||||||||||||")
        print("Playlist Details: ")
        print("User: "******"Playlist ID: " + uri[4])
        tracklist = get_playlist(spotify, uri[2], uri[4])
    elif uri[1] == "album":
        print("\n" + "||||||||||||||||||||||||||||||||||||||")
        print("Album Details: ")
        print("Album ID: " + uri[2])
        tracklist = get_album(spotify, uri[2])
except:
    print("Invalid URI!")
    sys.exit(0)

print("\n" + "||||||||||||||||||||||||||||||||||||||")
print("T R A C K L I S T")
for song in tracklist:
    print(song)

print("\n" + "||||||||||||||||||||||||||||||||||||||")
ch = input("Download All Songs (y/n): ")
if ch == 'n' or ch == 'N':
    sys.exit(0)

for song in tracklist:
    download(song)
Exemple #42
0
            for p in sorted(records.keys()):
                n = records.get(p, 0)
                if n >= out_num:
                    nn = n - out_num
                    out_num = 0
                else:
                    out_num = out_num - n
                    nn = 0
                records.update({p: nn})
                if out_num == 0:
                    break
            precious_num = records.get(current_price, 0)
            current_num = precious_num + volume[i]
            records.update({current_price: current_num})
            # print(current_price)
            # print(dates[i], current_price, floatCapitalOfAShares[i], floatSharesOfAShares[i])

    price = [k for k in records.keys()]
    num = [records[k] for k in records.keys()]
    print(code_name[c], min([k for k in records.keys() if records[k]]))
    # l1=plt.bar(price,num)
    # plt.title(c+" - "+et)
    # plt.show()


from download import download

et = '2021-01-28'
download(et=et)
for c in code_list:
    cal(c, et)