def sendImageWithUrl(self, to_, url): """Send a image with given image url :param url: image url to send """ path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9)) r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'w') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download image failure.') try: self.sendImage(to_, path) except Exception as e: raise e
def DownloadFile(self, fileurl, dlfile): """Downloads a given file to a given path/filename. Args: fileurl: String with URL of file to download. dlfile: String with path of file to be written to. Raises: OSError: If file cannot be opened/written to, function raises OSError. URLError: If URL cannot be opened, fucntion raises URLError. """ if not os.path.isfile(dlfile) or dlfile == TMPINDEX: print 'Downloading %s ...' % fileurl file_to_dl = urllib2.urlopen(fileurl) tmpfile = open(dlfile, 'wb') shutil.copyfileobj(file_to_dl, tmpfile) else: print '%s exists' % dlfile
def _get_data(self): archive_file_name, archive_hash = self._archive_file data_file_name, data_hash = self._data_file[self._segment] path = os.path.join(self._root, data_file_name) if not os.path.exists(path) or not check_sha1(path, data_hash): namespace = 'gluon/dataset/'+self._namespace downloaded_file_path = download(_get_repo_file_url(namespace, archive_file_name), path=self._root, sha1_hash=archive_hash) with zipfile.ZipFile(downloaded_file_path, 'r') as zf: for member in zf.namelist(): filename = os.path.basename(member) if filename: dest = os.path.join(self._root, filename) with zf.open(member) as source, \ open(dest, "wb") as target: shutil.copyfileobj(source, target) data, label = self._read_batch(path) self._data = nd.array(data, dtype=data.dtype).reshape((-1, self._seq_len)) self._label = nd.array(label, dtype=label.dtype).reshape((-1, self._seq_len))
def download(directory, filename): """Download (and unzip) a file from the MNIST dataset if not already done.""" filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz' _, zipped_filepath = tempfile.mkstemp(suffix='.gz') print('Downloading %s to %s' % (url, zipped_filepath)) urllib.request.urlretrieve(url, zipped_filepath) with gzip.open(zipped_filepath, 'rb') as f_in, \ tf.gfile.Open(filepath, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(zipped_filepath) return filepath
def download(directory, filename): """Download (and unzip) a file from the MNIST dataset if not already done.""" filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz' _, zipped_filepath = tempfile.mkstemp(suffix='.gz') print('Downloading %s to %s' % (url, zipped_filepath)) urllib.request.urlretrieve(url, zipped_filepath) with gzip.open(zipped_filepath, 'rb') as f_in, \ tf.gfile.Open(filepath, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(zipped_filepath) return filepath
def download_image_requests_to_dir(img_url, dir_name): dir_utils.clear_directory(f'{dir_utils.get_temp_med_dir()}/{dir_name}') img_ext = img_url.rsplit('.', 1)[1] s = requests.Session() r = s.get(img_url) if r.status_code == 200: with open(f"{dir_utils.get_temp_med_dir()}/{dir_name}/_image.{img_ext}", 'wb') as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) dprint(f"Downloaded image from: {img_url}") else: dprint(f"{r.status_code} Error! - {img_url}") if img_ext == 'png': dprint(f"Fixing image to force jpg conversion: {img_url}") img_fix = Image.open(f"{dir_utils.get_temp_med_dir()}/{dir_name}/_image.{img_ext}") img_fix.convert('RGB').save(f"{dir_utils.get_temp_med_dir()}/{dir_name}/_image.jpg") dir_utils.remove_file("_image.png", f'{dir_utils.get_temp_med_dir()}/{dir_name}') dprint(f"Downloaded image from: {img_url}")
def copyfileobj(src, dst, length=None): """Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. """ if length == 0: return if length is None: shutil.copyfileobj(src, dst) return BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in range(blocks): buf = src.read(BUFSIZE) if len(buf) < BUFSIZE: raise IOError("end of file reached") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") dst.write(buf) return
def addfile(self, tarinfo, fileobj=None): """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be opened with mode 'rb' to avoid irritation about the file size. """ self._check("aw") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: copyfileobj(fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) if remainder > 0: self.fileobj.write(NUL * (BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * BLOCKSIZE self.members.append(tarinfo)
def download_metadata_file(url, outputdir, program): """Download and unzip the catalogue files.""" zipped_index_path = os.path.join(outputdir, 'index_' + program + '.csv.gz') if not os.path.isfile(zipped_index_path): if not os.path.exists(os.path.dirname(zipped_index_path)): os.makedirs(os.path.dirname(zipped_index_path)) print("Downloading Metadata file...") content = urlopen(url) with open(zipped_index_path, 'wb') as f: shutil.copyfileobj(content, f) index_path = os.path.join(outputdir, 'index_' + program + '.csv') if not os.path.isfile(index_path): print("Unzipping Metadata file...") with gzip.open(zipped_index_path) as gzip_index, open(index_path, 'wb') as f: shutil.copyfileobj(gzip_index, f) return index_path
def compress_file_with_gzip(file_name, tmp_dir): """Compresses a file with GZIP. Args: file_name: Local path to file to be compressed. tmp_dir: Temporary directory where an GZIP file will be created. Returns: A tuple of gzip file name and size. """ logger = getLogger(__name__) base_name = os.path.basename(file_name) gzip_file_name = os.path.join(tmp_dir, base_name + '_c.gz') logger.debug('gzip file: %s, original file: %s', gzip_file_name, file_name) fr = open(file_name, 'rb') fw = gzip.GzipFile(gzip_file_name, 'wb') shutil.copyfileobj(fr, fw) fw.close() fr.close() SnowflakeFileUtil.normalize_gzip_header(gzip_file_name) statinfo = os.stat(gzip_file_name) return gzip_file_name, statinfo.st_size
def do_GET(self): client_address = self.client_address[0] logger.info('Serving transcoded media file to {} ...'.format( client_address)) self.send_head() path = self.translate_path(self.path) command = VLCEncoderSettings.command(path) logger.info('Launching {}'.format(command)) try: with open(os.devnull, 'w') as dev_null: encoder_process = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=dev_null) shutil.copyfileobj(encoder_process.stdout, self.wfile) except: logger.info('Connection from {} closed.'.format(client_address)) logger.debug(traceback.format_exc()) finally: pid = encoder_process.pid logger.info('Terminating process {}'.format(pid)) try: os.kill(pid, signal.SIGKILL) except: pass
def sendImageWithURL2(self, to, url): """Send a image with given image url :param url: image url to send """ path = 'tmp/pythonLine.data' r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'wb') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download image failure.') try: self.sendImage(to, path) except Exception as e: raise e
def download_tile(name): """ This function will download and extract the tile with the given name. The data is stored in the path pointed to by the :code:`_data_path` attribute of the module. Args: name(str): The name of the tile to download. """ base_url = "https://dds.cr.usgs.gov/srtm/version2_1/SRTM30" url = base_url + "/" + name + "/" + name + ".dem.zip" r = urllib.request.urlopen(url) filename = os.path.join(_get_data_path(), name + ".dem.zip") path = os.path.join(filename) with open(path, 'wb') as f: shutil.copyfileobj(r, f) # Extract zip file. with zipfile.ZipFile(filename, "r") as zip_ref: zip_ref.extractall(os.path.dirname(filename))
def write_output(ts, args): """ Adds provenance information to the specified tree sequence (ensuring that the output is reproducible) and write the resulting tree sequence to output. """ tables = ts.dump_tables() logger.debug("Updating provenance") provenance = get_provenance_dict() tables.provenances.add_row(json.dumps(provenance)) ts = tables.tree_sequence() if args.output is None: # There's no way to get tskit to write directly to stdout, so we write # to a tempfile first. with tempfile.TemporaryDirectory() as tmpdir: tmpfile = pathlib.Path(tmpdir) / "tmp.trees" ts.dump(tmpfile) with open(tmpfile, "rb") as f: shutil.copyfileobj(f, sys.stdout.buffer) else: logger.debug(f"Writing to {args.output}") ts.dump(args.output)
def extractFromVolume(container_urn, volume, imageURNs, destFolder): printVolumeInfo(container_urn.original_filename, volume) resolver = volume.resolver for imageUrn in imageURNs: imageUrn = utils.SmartUnicode(imageUrn) pathName = next(resolver.QuerySubjectPredicate(volume.urn, imageUrn, volume.lexicon.pathName)) with resolver.AFF4FactoryOpen(imageUrn) as srcStream: if destFolder != "-": pathName = escaping.arnPathFragment_from_path(pathName.value) while pathName.startswith("/"): pathName = pathName[1:] destFile = os.path.join(destFolder, pathName) if not os.path.exists(os.path.dirname(destFile)): try: os.makedirs(os.path.dirname(destFile)) except OSError as exc: # Guard against race condition if exc.errno != errno.EEXIST: raise with open(destFile, "wb") as destStream: shutil.copyfileobj(srcStream, destStream, length=32 * 2014) print("\tExtracted %s to %s" % (pathName, destFile)) else: shutil.copyfileobj(srcStream, sys.stdout)
def download_image(url, path): if os.path.exists(path): return True headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36' } try: r = requests.get(url, stream=True, timeout=9, headers=headers) if r.status_code == 200: with open(path, 'wb') as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) return True else: print(("Could not download image %s, response %d" % (url, r.status_code))) except Exception as e: if hasattr(e, 'message'): print(("Could not download image %s due to %s" % (url, e.message))) else: print(("Could not download image %s due to %s" % (url, repr(e)))) return False
def download(directory, filename): """Download (and unzip) a file from the MNIST dataset if not already done.""" filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz' _, zipped_filepath = tempfile.mkstemp(suffix='.gz') print('Downloading %s to %s' % (url, zipped_filepath)) urllib.request.urlretrieve(url, zipped_filepath) with gzip.open(zipped_filepath, 'rb') as f_in, \ tf.gfile.Open(filepath, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(zipped_filepath) return filepath
def bindiff_pickle_export(self, sample, is_64_bit = True, timeout = None): """ Load a sample into IDA Pro, perform autoanalysis and export a pickle file. :param sample: The sample's path :param is_64_bit: If the sample needs to be analyzed by the 64 bit version of IDA :param timeout: Timeout for the analysis in seconds :return: The file name of the exported pickle database. The file needs to be deleted by the caller. Returns None on error. """ data_to_send = { "timeout": timeout, "is_64_bit": is_64_bit} url = "%s/binexport_pickle" % next(self._urls) log.debug("curl -XPOST --data '%s' '%s'", json.dumps(data_to_send), url) response = requests.post(url, data = data_to_send, files = {os.path.basename(sample): open(sample, "rb")}) if response.status_code == 200: handle_tar, path_tar = tempfile.mkstemp(suffix = ".tar.gz") with os.fdopen(handle_tar, "wb") as f: map(f.write, response.iter_content(1024)) directory = tempfile.mkdtemp() subprocess.check_call(["tar", "xf", path_tar], cwd = directory) handle_bindiff, output_bindiff = tempfile.mkstemp(suffix = ".BinExport") with os.fdopen(handle_bindiff, "wb") as f: with open(os.path.join(directory, "output.BinExport"), "rb") as f2: shutil.copyfileobj(f2, f) handle_pickle, output_pickle = tempfile.mkstemp(suffix = ".pickle") with os.fdopen(handle_pickle, "wb") as f: with open(os.path.join(directory, "output.pickle"), "rb") as f2: shutil.copyfileobj(f2, f) os.unlink(path_tar) shutil.rmtree(directory) return output_bindiff, output_pickle else: log.error("Bindiff server responded with status code %d: %s", response.status_code, response.content) return None
def download(url, fname=None): """ Downloads a file. Args: url (str): The URL to download. fname (Optional[str]): The filename to store the downloaded file in. If `None`, take the filename from the URL. Defaults to `None`. Returns: The filename the URL was downloaded to. Raises: requests.exceptions.HTTPError: There was a problem connecting to the URL. """ # Determine the filename if fname is None: fname = url.split('/')[-1] # Stream the URL as a file, copying to local disk with contextlib.closing(requests.get(url, stream=True)) as r: try: r.raise_for_status() except requests.exceptions.HTTPError as error: print('Error connecting to URL: "{}"'.format(url)) print(r.text) raise error with open(fname, 'wb') as f: shutil.copyfileobj(r.raw, f) return fname
def download_one_file(download_url, local_dest, expected_byte=None, unzip_and_remove=False): """ Download the file from download_url into local_dest if the file doesn't already exists. If expected_byte is provided, check if the downloaded file has the same number of bytes. If unzip_and_remove is True, unzip the file and remove the zip file """ if os.path.exists(local_dest) or os.path.exists(local_dest[:-3]): print('%s already exists' % local_dest) else: print('Downloading %s' % download_url) local_file, _ = urllib.request.urlretrieve(download_url, local_dest) file_stat = os.stat(local_dest) if expected_byte: if file_stat.st_size == expected_byte: print('Successfully downloaded %s' % local_dest) if unzip_and_remove: with gzip.open(local_dest, 'rb') as f_in, open(local_dest[:-3], 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(local_dest) else: print('The downloaded file has unexpected number of bytes')
def handle_chunked(f, permpath, content_range): try: content_range = RE_CONTENT_RANGE.match(content_range) assert content_range, 'Invalid content range!' cr1, cr2, cr3 = [int(content_range.group(i)) for i in range(1, 4)] if os.path.isfile(permpath): size = stat(permpath) else: size = 0 if size != cr1: raise WrongOffset(size) with open(permpath, 'ab') as dest: shutil.copyfileobj(f, dest) except WrongOffset as e: size = e.offset else: size = stat(permpath) if size < cr3: return 'Continue', {'offset': size} elif size > cr3: raise RuntimeError('What?! Uploaded file is larger than ' 'what it is supposed to be?') return 'Success', {}
def download(url, file_path): r = requests.get(url, stream=True) if r.status_code == 200: # if request is successful with open(file_path, 'wb') as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f)
def url_download(url, topath=None, create_dirs=True): ''' url_download(url) yields the contents of the given url as a byte-string. url_download(url, topath) downloads the given url to the given path, topath and yields that path on success. The option create_dirs (default: True) may be set to False to prevent the topath directory from being created. ''' # ensure directory exists if topath: topath = os.path.expanduser(os.path.expandvars(topath)) if create_dirs and topath: dnm = os.path.dirname(topath) if not os.path.isdir(dnm): os.makedirs(os.path.abspath(dnm), 0o755) if six.PY2: response = urllib.request.urlopen(url) if topath is None: topath = response.read() else: with open(topath, 'wb') as fl: shutil.copyfileobj(response, fl) else: with urllib.request.urlopen(url) as response: if topath is None: topath = response.read() else: with open(topath, 'wb') as fl: shutil.copyfileobj(response, fl) return topath
def sendVideoWithURL(self, to_, url): path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9)) r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'w') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download video failure.') try: self.sendVideo(to_, path) except Exception as e: raise (e)
def sendAudioWithUrl(self, to_, url): path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9)) r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'w') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download audio failure.') try: self.sendAudio(to_, path) except Exception as e: raise (e)
def get_latexmk(version="ctan", dest="latexmk", verbose=True): try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen as _urlopen from contextlib import closing def urlopen(*args, **kwargs): return closing(_urlopen(*args, **kwargs)) import shutil import zipfile if version.lower() == "ctan": url = "http://mirrors.ctan.org/support/latexmk.zip" else: v = version.replace(".", "") url = "http://personal.psu.edu/jcc8/software/latexmk-jcc/latexmk-{}.zip".format(v) with io.BytesIO() as bio: if verbose: print("Downloading latexmk {}...".format(version), file=sys.stderr, end="") with urlopen(url) as web: shutil.copyfileobj(web, bio, length=131072) with zipfile.ZipFile(bio) as z: for zinfo in z.infolist(): if os.path.basename(zinfo.filename) == "latexmk.pl": with z.open(zinfo) as script, io.open(dest, "wb") as out: shutil.copyfileobj(script, out) # executable: https://stackoverflow.com/a/30463972/344821 mode = os.stat(dest).st_mode mode |= (mode & 0o444) >> 2 # copy R bits to X os.chmod(dest, mode) break else: raise ValueError("Couldn't find latexmk.pl in {}".format(url)) if verbose: print("saved to `{}`.".format(dest), file=sys.stderr)
def save_image(self, image, path, timestamp, size="large"): """Download and save image to path. Args: image: The url of the image. path: The directory where the image will be saved. timestamp: The time that the image was uploaded. It is used for naming the image. size: Which size of images to download. """ def print_status(s): import sys sys.stdout.write("\u001b[1K") spinner = ["-", "\\", "|", "/"][self.count % 4] print(f"\r{spinner} {s}", end="") if image: # image's path with a new name ext = os.path.splitext(image)[1] name = timestamp + ext save_dest = os.path.join(path, name) # save the image in the specified directory if if not (os.path.exists(save_dest)): r = requests.get(image + ":" + size, stream=True) if r.status_code == 200: with open(save_dest, "wb") as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) self.count += 1 print_status(f"{name} saved") else: print_status(f"Skipping {name}: already downloaded")
def DownloadFile(self, fileurl, dlfile): """Download a file.""" try: file_to_dl = urllib2.urlopen(fileurl) tmpfile = open(dlfile, 'wb') shutil.copyfileobj(file_to_dl, tmpfile) except urllib2.URLError, e: print 'Download of %s failed with error %s' % (fileurl, e) sys.exit()
def init_bidaf(bidaf_model_dir: str, download_ntlk_punkt: bool = False) -> bool: if os.path.isdir(bidaf_model_dir): print("bidaf model directory already present..", file=sys.stderr) else: print("Creating bidaf model directory..", file=sys.stderr) os.makedirs(bidaf_model_dir, exist_ok=True) # Download Punkt Sentence Tokenizer if download_ntlk_punkt: nltk.download("punkt", download_dir=bidaf_model_dir) nltk.download("punkt") # Download bidaf onnx model onnx_model_file = os.path.abspath(os.path.join(bidaf_model_dir, "bidaf.onnx")) print(f"Checking file {onnx_model_file}..", file=sys.stderr) if os.path.isfile(onnx_model_file): print("bidaf.onnx downloaded already!", file=sys.stderr) else: print("Downloading bidaf.onnx...", file=sys.stderr) response = requests.get( "https://onnxzoo.blob.core.windows.net/models/opset_9/bidaf/bidaf.onnx", stream=True, ) with open(onnx_model_file, "wb") as f: response.raw.decode_content = True shutil.copyfileobj(response.raw, f) return True
def download_image_requests(img_url): dir_utils.clear_directory(f'{dir_utils.get_temp_med_dir()}/internal/images') img_ext = img_url.rsplit('.', 1)[1] s = requests.Session() r = s.get(img_url, headers={'User-Agent': 'Mozilla/5.0'}) if r.status_code == 200: with open(f"{dir_utils.get_temp_med_dir()}/internal/images/_image.{img_ext}", 'wb') as f: r.raw.decode_content = True shutil.copyfileobj(r.raw, f) dprint(f"Downloaded image from: {img_url}") else: dprint(f"{r.status_code} Error! - {img_url}")