diff options
Diffstat (limited to 'paramiko/sftp_file.py')
-rw-r--r-- | paramiko/sftp_file.py | 60 |
1 files changed, 35 insertions, 25 deletions
diff --git a/paramiko/sftp_file.py b/paramiko/sftp_file.py index 8d147342..fdf667cd 100644 --- a/paramiko/sftp_file.py +++ b/paramiko/sftp_file.py @@ -64,13 +64,13 @@ class SFTPFile (BufferedFile): def __del__(self): self._close(async=True) - + def close(self): """ Close the file. """ self._close(async=False) - + def _close(self, async=False): # We allow double-close without signaling an error, because real # Python file objects do. However, we must protect against actually @@ -112,7 +112,7 @@ class SFTPFile (BufferedFile): return True # well, we have part of the request. see if another chunk has the rest. return self._data_in_prefetch_requests(buf_offset + buf_size, offset + size - buf_offset - buf_size) - + def _data_in_prefetch_buffers(self, offset): """ if a block of data is present in the prefetch buffers, at the given @@ -129,7 +129,7 @@ class SFTPFile (BufferedFile): # it's not here return None return index - + def _read_prefetch(self, size): """ read data out of the prefetch buffer, if possible. if the data isn't @@ -149,7 +149,7 @@ class SFTPFile (BufferedFile): return None prefetch = self._prefetch_data[offset] del self._prefetch_data[offset] - + buf_offset = self._realpos - offset if buf_offset > 0: self._prefetch_data[offset] = prefetch[:buf_offset] @@ -158,7 +158,7 @@ class SFTPFile (BufferedFile): self._prefetch_data[self._realpos + size] = prefetch[size:] prefetch = prefetch[:size] return prefetch - + def _read(self, size): size = min(size, self.MAX_REQUEST_SIZE) if self._prefetching: @@ -217,6 +217,16 @@ class SFTPFile (BufferedFile): """ self.sftp.sock.setblocking(blocking) + def seekable(self): + """ + Check if the file supports random access. + + :return: + `True` if the file supports random access. If `False`, + :meth:`seek` will raise an exception + """ + return True + def seek(self, offset, whence=0): self.flush() if whence == self.SEEK_SET: @@ -253,7 +263,7 @@ class SFTPFile (BufferedFile): attr = SFTPAttributes() attr.st_mode = mode self.sftp._request(CMD_FSETSTAT, self.handle, attr) - + def chown(self, uid, gid): """ Change the owner (``uid``) and group (``gid``) of this file. As with @@ -294,7 +304,7 @@ class SFTPFile (BufferedFile): Change the size of this file. This usually extends or shrinks the size of the file, just like the ``truncate()`` method on Python file objects. - + :param size: the new size of the file :type size: int or long """ @@ -302,17 +312,17 @@ class SFTPFile (BufferedFile): attr = SFTPAttributes() attr.st_size = size self.sftp._request(CMD_FSETSTAT, self.handle, attr) - + def check(self, hash_algorithm, offset=0, length=0, block_size=0): """ Ask the server for a hash of a section of this file. This can be used to verify a successful upload or download, or for various rsync-like operations. - + The file is hashed from ``offset``, for ``length`` bytes. If ``length`` is 0, the remainder of the file is hashed. Thus, if both ``offset`` and ``length`` are zero, the entire file is hashed. - + Normally, ``block_size`` will be 0 (the default), and this method will return a byte string representing the requested hash (for example, a string of length 16 for MD5, or 20 for SHA-1). If a non-zero @@ -320,12 +330,12 @@ class SFTPFile (BufferedFile): ``offset + length``) of ``block_size`` bytes is computed as a separate hash. The hash results are all concatenated and returned as a single string. - + For example, ``check('sha1', 0, 1024, 512)`` will return a string of length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes of the file, and the last 20 bytes will be the SHA-1 of the next 512 bytes. - + :param str hash_algorithm: the name of the hash algorithm to use (normally ``"sha1"`` or ``"md5"``) @@ -343,13 +353,13 @@ class SFTPFile (BufferedFile): :return: `str` of bytes representing the hash of each block, concatenated together - + :raises IOError: if the server doesn't support the "check-file" extension, or possibly doesn't support the hash algorithm requested - + .. note:: Many (most?) servers don't support this extension yet. - + .. versionadded:: 1.4 """ t, msg = self.sftp._request(CMD_EXTENDED, 'check-file', self.handle, @@ -358,7 +368,7 @@ class SFTPFile (BufferedFile): alg = msg.get_text() data = msg.get_remainder() return data - + def set_pipelined(self, pipelined=True): """ Turn on/off the pipelining of write operations to this file. When @@ -368,13 +378,13 @@ class SFTPFile (BufferedFile): server responses are collected. This means that if there was an error with one of your later writes, an exception might be thrown from within `.close` instead of `.write`. - + By default, files are not pipelined. - + :param bool pipelined: ``True`` if pipelining should be turned on for this file; ``False`` otherwise - + .. versionadded:: 1.5 """ self.pipelined = pipelined @@ -385,7 +395,7 @@ class SFTPFile (BufferedFile): `.read` calls. If reading the entire file, pre-fetching can dramatically improve the download speed by avoiding roundtrip latency. The file's contents are incrementally buffered in a background thread. - + The prefetched data is stored in a buffer until read via the `.read` method. Once data has been read, it's removed from the buffer. The data may be read in a random order (using `.seek`); chunks of the @@ -418,20 +428,20 @@ class SFTPFile (BufferedFile): n += chunk if len(chunks) > 0: self._start_prefetch(chunks) - + def readv(self, chunks): """ Read a set of blocks from the file by (offset, length). This is more efficient than doing a series of `.seek` and `.read` calls, since the prefetch machinery is used to retrieve all the requested blocks at once. - + :param chunks: a list of (offset, length) tuples indicating which sections of the file to read :type chunks: list(tuple(long, int)) :return: a list of blocks read, in the same order as in ``chunks`` - + .. versionadded:: 1.5.4 """ self.sftp._log(DEBUG, 'readv(%s, %r)' % (hexlify(self.handle), chunks)) @@ -470,7 +480,7 @@ class SFTPFile (BufferedFile): t = threading.Thread(target=self._prefetch_thread, args=(chunks,)) t.setDaemon(True) t.start() - + def _prefetch_thread(self, chunks): # do these read requests in a temporary thread because there may be # a lot of them, so it may block. |