def generate_profiles(mod_dir, outstream=sys.stdout):
    # open every mod in the `mod_dir` and look for xml profiles.
    profiles = []
    for f in os.listdir(mod_dir):
        if (f[-7:] != ".wotmod"):
            # not mod file for some reason, ignore
            continue
        try:
            with zipfile.ZipFile(os.path.join(mod_dir, f)) as zf:
                #            zipdata_bytes = io.BytesIO(zf_wotmod.read())
                #            with zipfile.ZipFile(zipdata_bytes) as zf:
                namelist = zf.namelist()
                for name in namelist:
                    if (name[-4:] == ".xml"):
                        outstream.write(
                            "Found a xml file in {:s} of {:s}, attempting to extract profile header(s).\n"
                            .format(name, f))
                        bytestring = zf.read(name)
                        xmltree = ET.fromstring(bytestring)[0]
                        if (xmltree.tag != "models"):
                            # is not correct format of a UML profile, ignoring.
                            continue
                        # read all children as profile headers
                        profiles.extend((child.tag for child in xmltree))
        except zipfile.BadZipFile:
            pass  # not a zip file, ignore
    return profiles
Esempio n. 2
0
def home(request):
    required_zip = Image.objects.last()
    # required_zip.zipped_images.extractall()
    final_filenames = []
    print(required_zip)
    zf = zipfile.ZipFile(required_zip.zipped_images)
    foldername = zf.filename[:-4]
    zf.extractall(path="media/" + foldername)
    filenames = zf.namelist()
    print(filenames)
    length = math.ceil(len(filenames) / 4)
    path = "media/" + foldername + "/"
    for filename in filenames:
        final_path = path + filename
        img = pillow_image.open(final_path)
        print(img.height, img.width)
        output_size = (350, 350)
        img = img.resize(output_size)
        print(img.height, img.width)
        img.save(final_path)

    for i in range(0, len(filenames), 4):
        temp = []
        j = i
        for j in range(i, i + 4):
            if j < len(filenames):
                temp.append(filenames[j])
        final_filenames.append(temp)
    context = {
        "foldername": foldername,
        "final_filenames": final_filenames,
    }
    print(final_filenames)
    return render(request, 'classifier/home.html', context)
Esempio n. 3
0
    def unzip(self, list=True, wildcard=None):
        print('unzipping')
        """
        :param list: if True, path_grabber class method will look for base path based on wildcard
        from self.parsed_path and return a list of zip paths.
        """

        if list:

            Unzipper.path_grabber(self, wildcard=wildcard)

        for file_path in self.parsed_path_list:
            output = os.path.join(self.base_output_folder,
                                  os.path.basename(file_path).strip('.zip'))
            try:
                with zipfile.ZipFile(file=file_path, mode='r') as zip_ref:
                    if not os.path.exists(
                            os.path.join(
                                self.base_output_folder,
                                os.path.basename(file_path).strip(".zip"))):
                        #print("dir does not exits")
                        os.makedirs(output)
                        zip_ref.extractall(path=output)
                    else:
                        print("zip exists")
            except zipfile.BadZipFile:
                print("Error: Zip file is corrupted")
Esempio n. 4
0
def zip():
    mas = zipfile36.ZipFile('test.zip', 'a')
    for i in os.listdir(
            'C:\\Users\\Administrator\\Desktop\\OpsManage-beta\\OpsManage\\views\\test'
    ):
        mas.write(i)
    mas.close()
Esempio n. 5
0
def main():


	url = 'http://datosabiertos.salud.gob.mx/gobmx/salud/datos_abiertos/datos_abiertos_covid19.zip'
	#url = 'https://www.python.org/static/img/[email protected]'

	myfile = requests.get(url)
	open('C:/Users/eact/OneDrive/Escritorio/Alan/ServicioSocial_IA/COVID19/BaseDatos/Actual.zip', 'wb').write(myfile.content)


	#Extracting a zipfile
	with z.ZipFile('Actual.zip', 'r') as my_zip:
		print(my_zip.namelist())
		s = my_zip.namelist()
		print(s)
		my_zip.extractall()					#extracting all

	#========================================


	print("BD: 19_700_200912_covid19.csv")
		#print(s)
	covid_20("19_700_200912_covid19.csv")
		#covid_20(s)

	#print("BD: 19_700_200912_covid19_intubado_si_no.csv")
	#covid_20("19_700_200912_covid19_intubado_si_no.csv")

	#print("BD: 19_FULL_201101_covid19_intubado_si_no.csv")
	#covid_20("19_FULL_201101_covid19_intubado_si_no.csv")

	
	"""
Esempio n. 6
0
 def __init__(self, isfolder, locationOrPath, *args, **kwargs):
     if(isfolder):
         if not os.path.exists(locationOrPath):
             os.makedirs(locationOrPath)
         self._base = None
         self._location = os.path.splitext(locationOrPath)[0]
     else:
         self._base = zipfile.ZipFile(locationOrPath, *args, **kwargs)
         self._location = None
Esempio n. 7
0
 def unzip(files, path):
     try:
         head, tail = os.path.split(files)
         with zipfile.ZipFile(files, "r") as zip_ref:
             zip_ref.setpassword(b"infected")
             zip_ref.extractall(path + "/../db/unzip/" + tail)
     except RuntimeError:
         print("[X] File skipped " + files)
     except:
         print("[X] Failed extract file " + files)
def data_to_images(data):
    MAYBE_A_MOVIE = [
        'countdown',
        'fingerprint',
    ]
    if 'images' in data["items"]["behavior"]["stimuli"]:

        # Sometimes the source is a zipped pickle:
        metadata = get_image_metadata(data)
        try:
            image_set = load_pickle(open(metadata['image_set'], 'rb'))
            images, images_meta = get_image_data(image_set)
            image_table = dict(
                metadata=metadata,
                images=images,
                image_attributes=images_meta,
            )
        except (AttributeError, UnicodeDecodeError, pickle.UnpicklingError):
            zfile = zipfile.ZipFile(metadata['image_set'])
            finfo = zfile.infolist()[0]
            ifile = zfile.open(finfo)
            image_set = load_pickle(ifile)
            images, images_meta = get_image_data(image_set)
            image_table = dict(
                metadata=metadata,
                images=images,
                image_attributes=images_meta,
            )
        except FileNotFoundError:
            logger.critical('Image file not found: {0}'.format(
                metadata['image_set']))
            image_table = dict(
                metadata={},
                images=[],
                image_attributes=[],
            )
    else:
        image_table = dict(
            metadata={},
            images=[],
            image_attributes=[],
        )

    # TODO: make this better, all we need to know is if there's at least one key...
    static_stimuli_names = [
        k for k in data['items']['behavior'].get('items', {}).keys()
        if k in MAYBE_A_MOVIE
    ]
    if len(static_stimuli_names) > 0:  # has static stimuli
        for name, meta in get_movie_metadata(data).items():
            image_table['metadata'][
                'movie:%s' %
                name] = meta  # prefix each name with 'movie:' maybe this is good?

    return image_table
Esempio n. 9
0
    def __init__(self, poetry, venv, io, target_fp, original=None):
        super(WheelBuilder, self).__init__(poetry, venv, io)

        self._records = []
        self._original_path = self._path
        if original:
            self._original_path = original.file.parent

        # Open the zip file ready to write
        self._wheel_zip = zipfile.ZipFile(target_fp,
                                          'w',
                                          compression=zipfile.ZIP_DEFLATED)
Esempio n. 10
0
def unzip(zip_file):
    print("[*] Beginning extraction process...")
    # parent = os.path.dirname(zip_file)
    # basename = os.path.splitext(zip_file)[0]
    # out_folder = os.path.join(basename, 'DICOM')
    zip = zipfile.ZipFile(zip_file)
    zip.setpassword(b"ar_unibg")
    for i, f in enumerate(zip.filelist):
        f.filename = os.path.join("DICOM_C2", "extracted_{0:03}".format(i))
        zip.extract(f)
        print("--- Extracted '%s'" % (f.filename))

    print("[*] Done")
Esempio n. 11
0
def is_valid_zip(zip_file):
    try:
        zipfile.ZipFile(zip_file)  # 能检测文件是否完整
        # return z_file.testzip()  # 测试能否解压zip文件
        return True
    except zipfile.BadZipFile as e:
        return False
    except Exception as e:
        # from logger import Log
        # logger = Log(__name__).get_log()
        # logger.error('trouble in zip_file ', exc_info=True)
        print('trouble in zip_file {}'.format(zip_file))
        raise e
def extract_files(PATH):
    """This function extract data files from the zipped folder located in PATH"""
    # files path
    data_files = os.path.join(PATH, 'UseCase_3_Datasets.zip')

    # Unzipping files
    h = open(data_files, 'rb')
    obj = zipfile.ZipFile(h)
    for name in obj.namelist():
        if name in ['sales_granular.csv', 'Surroundings.json']:
            outpath = PATH
            obj.extract(name, outpath)
    h.close()
Esempio n. 13
0
def main():
    args = parser.parse_args()

    if os.path.exists(args.output_path):
        raise ValueError('Output path already exists', args.output_path)
    os.makedirs(args.output_path)

    # Include images of type jpg and png
    images_full_path = glob(os.path.join(args.input_path, '*', '*.jpg')) \
                     + glob(os.path.join(args.input_path, '*', '*.png'))

    print("Num images found: ", len(images_full_path))

    images = []
    classes = []
    for i in images_full_path:
        print(i)
        img_type = i.split('/')[-1].split('.')[-1]  # 'jpg' or 'png'
        i_rel_path = os.path.join(
            *i.split('/')[-2:])  # path including 'class/file'
        class_name = i_rel_path.split('/')[0]

        # Create class directory
        if not os.path.exists(os.path.join(args.output_path, class_name)):
            os.makedirs(os.path.join(args.output_path, class_name))

        # Open image, resize and save in new path
        im = Image.open(i)
        if im.mode not in ['RGB', 'RGBA']:
            continue
        im = im.convert('RGB')
        new_img = im.resize((int(args.new_width), int(args.new_height)))
        new_img_rel_path = i_rel_path.split('.')[0] + "_resized." + img_type
        new_img_path = os.path.join(args.output_path, new_img_rel_path)
        new_img.save(new_img_path, quality=95)

        # Save img relative path and class for index.csv file
        images.append(new_img_rel_path)
        classes.append(class_name)

    # Save index.csv file, one row per image
    dataset_index = pd.DataFrame({'image': images, 'class': classes})
    dataset_index.to_csv(os.path.join(args.output_path, 'index.csv'),
                         index=False)

    # Create zip file with index.csv and resized images
    zipf = zipfile.ZipFile(os.path.join(args.output_path, args.zip_filename),
                           'w', zipfile.ZIP_DEFLATED)
    zipdir(args.output_path, zipf)
    zipf.close()
Esempio n. 14
0
def getfiles(request):
    ids = request.session['presies']
    presies = Presentation.objects.filter(id__in=ids)
    file_names = []
    for x in presies:
        file_names.append(x.pptx)

    zip_subdir = "tmp/presentation_folder"
    zip_filename = zip_subdir + ".zip"
    byte_stream = io.BytesIO()
    zf = zipfile.ZipFile(byte_stream, "w")

    for filename in file_names:
        conn = boto.connect_s3('AKIAVH6CVLPUTAWB5U4P',
                               'BF/zeRdEm5sEtzKymAMpJ6heO19Bv3XgSbvjkF85')
        bucket = conn.get_bucket('danielsantander-uldl')
        s3_file_path = bucket.get_key(filename)
        response_headers = {
            'response-content-type': 'application/force-download',
            'response-content-disposition':
            'attachment;filename="%s"' % filename
        }
        url = s3_file_path.generate_url(60,
                                        'GET',
                                        response_headers=response_headers,
                                        force_http=True)

        # download the file
        file_response = requests.get(url)

        if file_response.status_code == 200:

            # create a copy of the file
            string = str(filename)[10:]
            f1 = open(string, 'wb')
            f1.write(file_response.content)
            f1.close()

            # write the file to the zip folder
            fdir, fname = os.path.split(string)
            zip_path = os.path.join(zip_subdir, fname)
            zf.write(string, zip_path)

    # close the zip folder and return
    zf.close()
    response = HttpResponse(byte_stream.getvalue(),
                            content_type="application/x-zip-compressed")
    response['Content-Disposition'] = 'attachment; filename=%s' % zip_filename
    return response
Esempio n. 15
0
    def __archive(self, archive_path, zip_filename):

        file_list = os.listdir(archive_path)

        zip_pathname = os.path.join(self.export_path, zip_filename + '.zip')
        with zipfile.ZipFile(zip_pathname, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
            for file in file_list:
                filename = os.path.basename(file)

                if file == zip_filename + '.zip':
                    continue

                # new_zip.write('data/temp/test1.txt', arcname='test1.txt')
                file_pathname = os.path.join(archive_path, filename)
                new_zip.write(file_pathname, arcname=filename)
Esempio n. 16
0
def main():
    """
	Zipfile password cracker using a brute-force dictionary attack
	"""
    zipfilename = 'test.zip'
    dictionary = 'dictionary.txt'

    password = None
    zip_file = zipfile.ZipFile(zipfilename)
    with open(dictionary, 'r') as f:
        for line in f.readlines():
            password = line.strip('\n')
            try:
                zip_file.extractall(pwd=password)
                password = '******' % password
            except:
                pass
    print password
Esempio n. 17
0
def extractZip(zf_path,
               extract_location,
               rollback=False,
               outstream=sys.stdout):
    # attempt to record the files created; if encounter an error and enabled rollback, remove all files found in the namelist
    with zipfile.ZipFile(zf_path, 'r') as zf:
        namelist = zf.namelist()
        try:
            zf.extractall(path=extract_location)
        except ValueError:
            if (rollback):
                [
                    os.path.exists(os.path.join(extract_location, f))
                    and os.remove(os.path.join(extract_location, f))
                    for f in namelist
                ]  # might be a ridiculous one liner
        #print(namelist)
    outstream.write("Extracted file {:s} in directory {:s}\n".format(
        zf_path, extract_location))
    return namelist
    def load_embedding(self, path, max_length_dictionary=10000):
        """
        load embedding map

        Arguments:
            path {[str]} -- the absolute path of where embedding map is

        Keyword Arguments:
            max_length_dictionary {int} -- maximum length of words to be loaded (default: {10000})

        Returns:
            [dict] -- embedding map loaded as python dictionary
        """
        embeddings_dict = {}
        i = 0

        if ".zip/" in file_path:
            archive_path = os.path.abspath(file_path)
            split = archive_path.split(".zip/")
            archive_path = split[0] + ".zip"
            path_inside = split[1]
            archive = zipfile.ZipFile(archive_path, "r")
            embeddings = archive.read(path_inside).decode("utf8").split("\n")
            for i, words in enumerate(embeddings):
                embeddings_dict[words] = i

                if i == max_length_dictionary:
                    break
            return embeddings_dict

        with open(path, 'r') as f:
            for line in f:
                values = line.split()
                if values[0].isalnum():
                    embeddings_dict[values[0]] = i
                    i += 1

                if i == max_length_dictionary:
                    break

        return embeddings_dict
Esempio n. 19
0
def unzip(path_from='trec_gen/zips', path_to='trec_gen/files'):
    for fname in os.listdir(path_from):
        with zipfile.ZipFile(os.path.join(path_from, fname), 'r') as zip_ref:
            zip_ref.extractall(path_to)
Esempio n. 20
0
#             # 判断是不是压缩文件
#             if(zipfile36.is_zipfile(zip_file_path)):
#                 # 将文件路径下面的所有文件全部列出来
#                 zipfile = zipfile36.ZipFile(zip_file_path)
#                 # 遍历包含的文件全部解压
#                 for file in zipfile.namelist():
#                     zipfile.extract(file,  organ_folder_path.replace("\\", "/"))
#                 # 接触解压缩占用
#                 zipfile.close()
#             # 将压缩文件删除掉
#             os.remove(zip_file_path)

folder_path = "H:\\耳部CT数据集\\WED74例标注数据导出后"
for patient_folder in os.listdir(folder_path):
    print("正在处理" + patient_folder)
    sub_folder_path = os.path.join(folder_path, patient_folder)
    for zip_file_name in os.listdir(sub_folder_path):
        zip_file_path = os.path.join(sub_folder_path, zip_file_name)
        # 特别注意的就是路径中不允许出现//
        zip_file_path = zip_file_path.replace("\\", "/")
        # 判断是不是压缩文件
        if (zipfile36.is_zipfile(zip_file_path)):
            # 将文件路径下面的所有文件全部列出来
            zipfile = zipfile36.ZipFile(zip_file_path)
            # 遍历包含的文件全部解压
            for file in zipfile.namelist():
                zipfile.extract(file, sub_folder_path.replace("\\", "/"))
            # 接触解压缩占用
            zipfile.close()
        # 将压缩文件删除掉
        os.remove(zip_file_path)
Esempio n. 21
0
import zipfile36 as zipfile

archive = zipfile.ZipFile('tmp/23.zip', 'r')
imgfile = archive.open('img_01.png')
Esempio n. 22
0
    def initialize(self):
        """Initialize the camera.

        The function finds the XML description file (described in section
        4.1.2.1 of the GenICam GenTL Standard v1.5) of the camera and creates
        a node map based on it. After that it initializes a `features`
        dictionary containing the wrapped GenICam features.

        Raises
        ------
        RuntimeError
            If the URL pointing to XML description file is invalid.
        FileNotFoundError
            If GenICam XML description file is not found.
        """
        if not self.is_initialized():
            # Open device in such way, that only host has access to the device.
            # The process has read-and-write access to the device. This access
            # flag is described in section 6.4.3.1 of the GenICam GenTL
            # Standard (version 1.5).
            self._device.open(
                gtl.DEVICE_ACCESS_FLAGS_LIST.DEVICE_ACCESS_EXCLUSIVE)
            port = self._device.remote_port
            # Here we parse the URL, which tells the location of the XML
            # description file of the camera (there can be more than one). The
            # format of the URL is described in section 4.1.2.1 of the GenICam
            # GenTL Standard (version 1.5).
            xml_files = {}
            for url_info in port.url_info_list:
                splitted_url = url_info.url.split("?")
                if len(splitted_url) == 2:
                    others, schema_version = splitted_url
                else:
                    others = splitted_url
                location, others = others.split(":")
                if location == "local":
                    _, address, size = others.split(";")
                    xml_files["local"] = (int(address, 16), int(size, 16))
                elif location == "file":
                    splitted_url = others.split("///")
                    if len(splitted_url) == 2:
                        xml_files["file"] = splitted_url[1]
                    else:
                        xml_files["file"] = splitted_url
                elif location == "http":
                    xml_files["http"] = splitted_url
                else:
                    raise RuntimeError("Invalid URL.")

            if xml_files:  # Check that at least one XML file is found.
                # XML location preference:
                #   1. module register map
                if "local" in xml_files:
                    content = port.read(*xml_files["local"])[1]
                #   2. local directory
                elif "file" in xml_files:
                    with open(xml_files["file"], "r") as file:
                        content = file.read()
                #   3. vendor website
                elif "http" in xml_files:
                    with urllib.request.urlopen(xml_files["http"]) as file:
                        content = file.read()
            else:  # If no XML file is found, raise an exception.
                raise FileNotFoundError(
                    "No GenICam XML description file found.")

            # Create a BytesIO stream object using the `content` buffer.
            file_content = io.BytesIO(content)

            # According to GenICam GenTL Standard (v1.5, section 4.1.2) the XML
            # can be either an uncompressed XML description file or
            # Zip-compressed file (using DEFLATE and STORE compression methods).
            # Here we check if the file is a zip file, and extract the contents
            # if it is.
            if zipfile.is_zipfile(file_content):
                with zipfile.ZipFile(file_content, "r") as zip_file:
                    # Iterate over the files inside the zip.
                    for file in zip_file.infolist():
                        # Find the XML file using the file extension.
                        if os.path.splitext(
                                file.filename)[1].lower() == ".xml":
                            content = zip_file.read(file).decode("utf8")

            _port = self._Port(port)

            self._node_map = gapi.NodeMap()  # Crate a node map
            # Load the XML description file contents to the node map.
            self._node_map.load_xml_from_string(content)
            # Connect the port to the node map instance.
            self._node_map.connect(_port, port.name)

            # Exclude features that are not implemented and wrap all the
            # remaining features inside feature objects, that simplify the usage
            # of the features.
            self._features = {}
            for feature_name in dir(self._node_map):  # Iterate over features.
                # Get feature from the node map.
                feature = getattr(self._node_map, feature_name)
                feature_type = type(feature)  # Get the `genicam2` type.
                # Exclude features that are not implemented (access mode `0`).
                if (feature_type in camazing.feature_types.mapping
                        and feature.get_access_mode() > 0):
                    # Select a proper wrapper type for feature and put it to
                    # features dictionary.
                    self._features[feature_name] = \
                        camazing.feature_types.mapping[feature_type](feature)
Esempio n. 23
0
    ):
        mas.write(i)
    mas.close()


def unzip(pwd_1, mas):
    try:
        mas.extractall(path='.', pwd=pwd_1.encode('utf-8'))  #解压
        print(pwd_1)
        global flag
        flag = False
    except RuntimeError:
        pass


if __name__ == '__main__':
    mas_1 = zipfile36.ZipFile('test1.zip', 'r')
    iter_1 = itertools.permutations(range(3), 2)
    flag = True
    for i in iter_1:
        a = str(i).split(',')
        iter_2 = ''.join(a)
        iter_3 = iter_2.strip('(').strip(')')
        passwd = iter_3.replace(' ', '')
        th1 = threading.Thread(target=unzip, args=(passwd, mas_1))
        th2 = threading.Thread(target=unzip, args=(passwd, mas_1))
        th1.start()
        th2.start()
        th1.join()
        th1.join()
Esempio n. 24
0
app = Flask(__name__)

# from kaggle.api.kaggle_api_extended import KaggleApi
import zipfile36 as zipfile

# api = KaggleApi()
# api.authenticate()
# api.dataset_download_files('dannielr/marvel-superheroes')

# try:
#     if os.path.exists('./marvel-superheroes.zip'):
#         print('file already been downloaded')
#         pass
#     else:
try:
    with zipfile.ZipFile('marvel-superheroes.zip', 'r') as zip:
        zip.extractall('./datasets/')
except Exception as e:
    print(e)
else:
    print('file extracted')

destination_folder = './outputfile/'
if os.path.exists(destination_folder):
    pass
else:
    os.mkdir(destination_folder)


def write_df_to_db(df, table_name):
    try:
Esempio n. 25
0
def getDreamImage(image_path):
    #download google's pre-trained neural network
    url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip'
    data_dir = 'data/'
    model_name = os.path.split(url)[-1]
    local_zip_file = os.path.join(data_dir, model_name)
    if not os.path.exists(local_zip_file):
        print('downloading zip. . .')
        # Download
        model_url = urllib.request.urlopen(url)
        print('model_url:', model_url)
        with open(local_zip_file, 'wb') as output:
            output.write(model_url.read())
        # Extract
        print('extracting zip. . .')
        with zipfile.ZipFile(local_zip_file, 'r') as zip_ref:
            zip_ref.extractall(data_dir)

    #start with a gray image with a little noise
    img_noise = np.random.uniform(size=(224, 224, 3)) + 100.0

    model_fn = 'tensorflow_inception_graph.pb'

    #Creating Tensorflow session and loading the model
    graph = tf.Graph()
    sess = tf.InteractiveSession(graph=graph)
    with tf.gfile.FastGFile(os.path.join(data_dir, model_fn), 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
    t_input = tf.placeholder(np.float32,
                             name='input')  # define the input tensor
    imagenet_mean = 117.0
    t_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)
    tf.import_graph_def(graph_def, {'input': t_preprocessed})

    layers = [
        op.name for op in graph.get_operations()
        if op.type == 'Conv2D' and 'import/' in op.name
    ]
    feature_nums = [
        int(graph.get_tensor_by_name(name + ':0').get_shape()[-1])
        for name in layers
    ]

    print('Number of layers', len(layers))
    print('Total number of feature channels:', sum(feature_nums))

    # Helper functions for TF Graph visualization
    #pylint: disable=unused-variable
    def strip_consts(graph_def, max_const_size=32):
        """Strip large constant values from graph_def."""
        strip_def = tf.GraphDef()
        for n0 in graph_def.node:
            n = strip_def.node.add()  #pylint: disable=maybe-no-member
            n.MergeFrom(n0)
            if n.op == 'Const':
                tensor = n.attr['value'].tensor
                size = len(tensor.tensor_content)
                if size > max_const_size:
                    tensor.tensor_content = "<stripped %d bytes>" % size
        return strip_def

    def rename_nodes(graph_def, rename_func):
        res_def = tf.GraphDef()
        for n0 in graph_def.node:
            n = res_def.node.add()  #pylint: disable=maybe-no-member
            n.MergeFrom(n0)
            n.name = rename_func(n.name)
            for i, s in enumerate(n.input):
                n.input[i] = rename_func(
                    s) if s[0] != '^' else '^' + rename_func(s[1:])
        return res_def

    def showarray(a):
        a = np.uint8(np.clip(a, 0, 1) * 255)
        return a
        #plt.imshow(a)
        #plt.show()

    def visstd(a, s=0.1):
        '''Normalize the image range for visualization'''
        return (a - a.mean()) / max(a.std(), 1e-4) * s + 0.5

    def T(layer):
        '''Helper for getting layer output tensor'''
        return graph.get_tensor_by_name("import/%s:0" % layer)

    def render_naive(t_obj, img0=img_noise, iter_n=20, step=1.0):
        t_score = tf.reduce_mean(t_obj)  # defining the optimization objective
        t_grad = tf.gradients(
            t_score,
            t_input)[0]  # behold the power of automatic differentiation!

        img = img0.copy()
        for _ in range(iter_n):
            g, _ = sess.run([t_grad, t_score], {t_input: img})
            # normalizing the gradient, so the same step size should work
            g /= g.std() + 1e-8  # for different layers and networks
            img += g * step
        showarray(visstd(img))

    def tffunc(*argtypes):
        '''Helper that transforms TF-graph generating function into a regular one.
        See "resize" function below.
        '''
        placeholders = list(map(tf.placeholder, argtypes))

        def wrap(f):
            out = f(*placeholders)

            def wrapper(*args, **kw):
                return out.eval(dict(zip(placeholders, args)),
                                session=kw.get('session'))

            return wrapper

        return wrap

    def resize(img, size):
        img = tf.expand_dims(img, 0)
        return tf.image.resize_bilinear(img, size)[0, :, :, :]

    resize = tffunc(np.float32, np.int32)(resize)

    def calc_grad_tiled(img, t_grad, tile_size=512):
        '''Compute the value of tensor t_grad over the image in a tiled way.
        Random shifts are applied to the image to blur tile boundaries over 
        multiple iterations.'''
        sz = tile_size
        h, w = img.shape[:2]
        sx, sy = np.random.randint(sz, size=2)
        img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
        grad = np.zeros_like(img)
        for y in range(0, max(h - sz // 2, sz), sz):
            for x in range(0, max(w - sz // 2, sz), sz):
                sub = img_shift[y:y + sz, x:x + sz]
                g = sess.run(t_grad, {t_input: sub})
                grad[y:y + sz, x:x + sz] = g
        return np.roll(np.roll(grad, -sx, 1), -sy, 0)

    def render_deepdream(t_obj,
                         img0=img_noise,
                         iter_n=10,
                         step=2,
                         octave_n=2,
                         octave_scale=1.4):
        t_score = tf.reduce_mean(t_obj)  # defining the optimization objective
        t_grad = tf.gradients(
            t_score,
            t_input)[0]  # behold the power of automatic differentiation!

        # split the image into a number of octaves
        img = img0
        octaves = []
        for _ in range(octave_n - 1):
            hw = img.shape[:2]
            lo = resize(img, np.int32(np.float32(hw) / octave_scale))
            hi = img - resize(lo, hw)
            img = lo
            octaves.append(hi)

        # generate details octave by octave
        for octave in range(octave_n):
            if octave > 0:
                hi = octaves[-octave]
                img = resize(img, hi.shape[:2]) + hi
            for _ in range(iter_n):
                g = calc_grad_tiled(img, t_grad)
                img += g * (step / (np.abs(g).mean() + 1e-7))

        #this will usually be like 3 or 4 octaves
        #Step 5 output deep dream image via matplotlib
        return showarray(img / 255.0)

    #open image
    img0 = PIL.Image.open(image_path)
    img0 = np.float32(img0)

    #Apply gradient ascent to that layer
    a = render_deepdream(tf.square(T('mixed4d')), img0)
    return a
Esempio n. 26
0
# newzip.write("ex16_sample.txt")
# newzip.close()

# # new write will overwrite data
# newzip = zipfile.ZipFile("newzip.zip", "w")
# newzip.write("py3book30.zip")
# newzip.close()

# # data can be appended and will not overwrite
# newzip = zipfile.ZipFile("newzip.zip", "a")
# newzip.write("ex15_sample.txt")
# newzip.write("ex16_sample.txt")
# newzip.close()

# READING THEM
nzip = zipfile.ZipFile("newzip.zip", "r")
data = nzip.read("ex15_sample.txt") # particular file in zip
print(data)

# list all directories in zip, like dir in cmd or ls in Terminal
nzip.printdir()

# extract a particular file from zip in dir
nzip.extract("ex15_sample.txt")#, "newdir")

# extract everything
nzip.extractall("dirall") 

# if no dir mentioned, default dir

# GET INFO
Esempio n. 27
0
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
import zipfile36 as zipfile

zf = zipfile.ZipFile('mobile.zip')

st.title("Sentiment analysis of Mobile phone brands")
st.sidebar.title("Customer Satisfaction Reviews on Mobile brands")


@st.cache()
def load_data():
    df = pd.read_csv(zf.open('mobile.csv'))
    senti = {
        1: 'Negative',
        2: 'Negative',
        3: 'Neutral',
        4: 'Positive',
        5: 'Positive'
    }
    df['Sentiment'] = df['Ratings'].map(senti)
    df = df.dropna()
    return df


df = load_data()

alg_data = ("algo.csv")
Esempio n. 28
0
 parser.add_argument("--no_damaged_model", action="store_true", help="If set, do not import damaged model to the XML profile (show original vehicle's wreck)")
 parser.add_argument("--relocate_data", action="store_true", help="Set to relocate all resource file and modify .visual(_processed) accordingly . Currently unimplemented.")
 parser.add_argument("--pretty", action="store_false", help="Specify to disable default result of XML printing (no indent, no stripping values).")
 parser.add_argument("--lax", action="store_false", help="If set, do not raise error when missing nodes during xml conversion.")
 
 args = parser.parse_args()
 if(args.relocate_data):
     raise NotImplementedError
 if(args.output is None):
     args.output = add_suffix(args.input, "_UML")
 if(args.profile_name is None):
     args.profile_name = os.path.splitext(os.path.basename(args.input))[0]
 internal_UML_profile_filename = os.path.join("res", "scripts", "client", "mods", "UMLprofiles", args.profile_name + ".xml")
 # print(args, internal_UML_profile_filename)
 # open the zipfile, both input and output
 with zipfile.ZipFile(args.input, "r", compression=zipfile.ZIP_STORED) as inf, ZipFileOrFolder(args.extracted, args.output, "w", compression=zipfile.ZIP_STORED) as outf:
     # for everything not the profile, copy over
     for info in inf.infolist():
         if(info.is_dir()):
             # do nothing to directory
             pass
         elif("item_defs" not in info.filename):
             # resource file, move it over
             print("Moving " + info.filename)
             with inf.open(info.filename, "r") as inresfile, outf.open(info.filename, "w") as outresfile:
                 outresfile.write(inresfile.read())
         else:
             assert "xml" in info.filename, "Expecting an item_defs vehicle profile, but received {}".format(info.filename)
             if(args.resource_only):
                 print("Resource only mode; do not convert corresponding item_defs XML")
                 continue
Esempio n. 29
0
def cleanUp():
    os.remove(download_zip)


try:
    print("Downloading the installer...")
    wget.download(download_url, download_path)

    for x in range(len(serverPath)):
        t = threading.Thread(target=loading)
        t.start()
        sourceMsi = serverPath[x] + r"\OpeniTCLIMSServer.msi"
        newMsi = serverPath[x] + r"\OpeniTCLIMSServer_.msi"
        sourceLog = serverPath[x] + r"\changelog.txt"
        newLog = serverPath[x] + r"\changelog_.txt"
        with zipfile36.ZipFile(download_zip, "r") as z:
            z.extractall(serverPath[x])
        shutil.move(sourceMsi, newMsi)
        shutil.move(sourceLog, newLog)
        sleep(10)

except FileNotFoundError as error:
    done = True
    print("\rUnzip Failed! %s!\nPlease check: %s" %
          (error.strerror, error.filename))
    cleanUp()

else:
    done = True
    print("\rUnzip was successful!")
    cleanUp()
from bs4 import BeautifulSoup
import datetime
import MySQLdb
import pandas as pd

dict = {}
list_city = []
list_startTime = []
list_endTime = []
list_description = []

#下載氣象局資料夾 並解壓縮
uri = 'http://opendata.cwb.gov.tw/opendataapi?dataid=F-D0047-093&authorizationkey=CWB-3F41A7B9-BAF0-4CCE-8A93-AFEBC64EF888'
#uri = 'http://opendata.cwb.gov.tw/opendataapi?dataid=F-D0047-093&authorizationkey=CWB-3FB0188A-5506-41BE-B42A-3785B42C3823'
res = requests.get(uri)
z = zipfile.ZipFile(io.BytesIO(res.content))
z.extractall(
    r'C:\Users\JENNIFER\Downloads\weatherData')  #下載zip資料夾中所有檔案(已解壓縮)到指定資料夾

#開啟指定檔案 整理格式
weatherXML_path = r'C:\Users\JENNIFER\Downloads\weatherData\TAIWAN_72hr_CH.xml'
infile = open(weatherXML_path, 'r', encoding='utf8')
weatherXML = infile.read()
soup = BeautifulSoup(weatherXML, 'xml')
blocks = soup.select('location')


def changeTypeDatetime(textTime):
    temp_Time = textTime.split('T')[0]
    temp_Time2 = textTime.split('T')[1]
    temp_Time2 = temp_Time2.split('+')[0]