summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--.travis.yml11
-rw-r--r--ChangeLog.042
-rw-r--r--ChangeLog.12928
-rw-r--r--README.rst51
-rw-r--r--demos/demo_server.py6
-rw-r--r--dev-requirements.txt13
-rw-r--r--paramiko/__init__.py105
-rw-r--r--paramiko/_version.py2
-rw-r--r--paramiko/_winapi.py39
-rw-r--r--paramiko/agent.py25
-rw-r--r--paramiko/auth_handler.py163
-rw-r--r--paramiko/ber.py6
-rw-r--r--paramiko/buffered_pipe.py49
-rw-r--r--paramiko/channel.py158
-rw-r--r--paramiko/client.py94
-rw-r--r--paramiko/common.py16
-rw-r--r--paramiko/config.py34
-rw-r--r--paramiko/dsskey.py22
-rw-r--r--paramiko/ecdsakey.py140
-rw-r--r--paramiko/file.py70
-rw-r--r--paramiko/hostkeys.py63
-rw-r--r--paramiko/kex_gex.py43
-rw-r--r--paramiko/kex_group1.py17
-rw-r--r--paramiko/kex_group14.py2
-rw-r--r--paramiko/kex_gss.py85
-rw-r--r--paramiko/message.py29
-rw-r--r--paramiko/packet.py132
-rw-r--r--paramiko/pipe.py30
-rw-r--r--paramiko/pkey.py66
-rw-r--r--paramiko/primes.py17
-rw-r--r--paramiko/proxy.py40
-rw-r--r--paramiko/py3compat.py28
-rw-r--r--paramiko/resource.py16
-rw-r--r--paramiko/rsakey.py19
-rw-r--r--paramiko/server.py189
-rw-r--r--paramiko/sftp.py20
-rw-r--r--paramiko/sftp_attr.py20
-rw-r--r--paramiko/sftp_client.py107
-rw-r--r--paramiko/sftp_file.py115
-rw-r--r--paramiko/sftp_handle.py27
-rw-r--r--paramiko/sftp_server.py100
-rw-r--r--paramiko/sftp_si.py44
-rw-r--r--paramiko/ssh_exception.py52
-rw-r--r--paramiko/ssh_gss.py79
-rw-r--r--paramiko/transport.py623
-rw-r--r--paramiko/util.py29
-rw-r--r--paramiko/win_pageant.py13
-rw-r--r--setup.cfg8
-rw-r--r--setup.py48
-rw-r--r--setup_helper.py17
-rw-r--r--sites/www/changelog.rst210
-rw-r--r--sites/www/conf.py3
-rw-r--r--sites/www/faq.rst10
-rw-r--r--sites/www/installing-1.x.rst26
-rw-r--r--sites/www/installing.rst4
-rw-r--r--tasks.py38
-rwxr-xr-xtest.py1
-rw-r--r--tests/loop.py2
-rw-r--r--tests/stub_sftp.py8
-rw-r--r--tests/test_auth.py7
-rw-r--r--tests/test_client.py8
-rw-r--r--tests/test_ecdsa_256.key (renamed from tests/test_ecdsa.key)0
-rw-r--r--tests/test_ecdsa_384.key6
-rw-r--r--tests/test_ecdsa_521.key7
-rw-r--r--tests/test_ecdsa_password_256.key (renamed from tests/test_ecdsa_password.key)0
-rw-r--r--tests/test_ecdsa_password_384.key9
-rw-r--r--tests/test_ecdsa_password_521.key10
-rw-r--r--tests/test_gssapi.py8
-rw-r--r--tests/test_hostkeys.py12
-rw-r--r--tests/test_packetizer.py12
-rw-r--r--tests/test_pkey.py199
-rwxr-xr-xtests/test_sftp.py2
-rw-r--r--tests/test_ssh_gss.py4
-rw-r--r--tests/test_transport.py40
-rw-r--r--tests/test_util.py80
-rw-r--r--tox-requirements.txt3
-rw-r--r--tox.ini6
77 files changed, 2532 insertions, 4235 deletions
diff --git a/.travis.yml b/.travis.yml
index 3b7b2b42..c8faf0a2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -9,6 +9,8 @@ python:
- "3.3"
- "3.4"
- "3.5"
+ - "3.6"
+ - "pypy-5.4.1"
install:
# Self-install for setup.py-driven deps
- pip install -e .
@@ -18,11 +20,10 @@ install:
script:
# Main tests, w/ coverage!
- inv test --coverage
- # Ensure documentation & invoke pipeline run OK.
- # Run 'docs' first since its objects.inv is referred to by 'www'.
- # Also force warnings to be errors since most of them tend to be actual
- # problems.
- - invoke docs -o -W www -o -W
+ # Ensure documentation builds, both sites, maxxed nitpicking
+ - inv sites
+ # flake8 is now possible!
+ - flake8
notifications:
irc:
channels: "irc.freenode.org#paramiko"
diff --git a/ChangeLog.0 b/ChangeLog.0
deleted file mode 100644
index c151d251..00000000
--- a/ChangeLog.0
+++ /dev/null
@@ -1,42 +0,0 @@
-
-2003-08-24:
- * implemented the other hashes: all 4 from the draft are working now
- * added 'aes128-cbc' and '3des-cbc' cipher support
- * fixed channel eof/close semantics
-2003-09-12: version "aerodactyl"
- * implemented group-exchange kex ("kex-gex")
- * implemented RSA/DSA private key auth
-2003-09-13:
- * fixed inflate_long and deflate_long to handle negatives, even though
- they're never used in the current ssh protocol
-2003-09-14:
- * fixed session_id handling: re-keying works now
- * added the ability for a Channel to have a fileno() for select/poll
- purposes, although this will cause worse window performance if the
- client app isn't careful
-2003-09-16: version "bulbasaur"
- * fixed pipe (fileno) method to be nonblocking and it seems to work now
- * fixed silly bug that caused large blocks to be truncated
-2003-10-08:
- * patch to fix Channel.invoke_subsystem and add Channel.exec_command
- [vaclav dvorak]
- * patch to add Channel.sendall [vaclav dvorak]
- * patch to add Channel.shutdown [vaclav dvorak]
- * patch to add Channel.makefile and a ChannelFile class which emulates
- a python file object [vaclav dvorak]
-2003-10-26:
- * thread creation no longer happens during construction -- use the new
- method "start_client(event)" to get things rolling
- * re-keying now takes place after 1GB of data or 1 billion packets
- (these limits can be easily changed per-session if needed)
-2003-11-06:
- * added a demo server and host key
-2003-11-09:
- * lots of changes to server mode
- * ChannelFile supports universal newline mode; fixed readline
- * fixed a bug with parsing the remote banner
-2003-11-10: version "charmander"
- * renamed SSHException -> SecshException
- * cleaned up server mode and the demo server
-
-*** for all subsequent changes, please see 'tla changelog'.
diff --git a/ChangeLog.1 b/ChangeLog.1
deleted file mode 100644
index 2fdae5a0..00000000
--- a/ChangeLog.1
+++ /dev/null
@@ -1,2928 +0,0 @@
-# do not edit -- automatically generated by arch changelog
-# arch-tag: automatic-ChangeLog--robey@lag.net--2003-public/secsh--dev--1.0
-#
-
-2005-04-18 00:53:57 GMT Robey Pointer <robey@lag.net> patch-164
-
- Summary:
- fix some docs
- Revision:
- secsh--dev--1.0--patch-164
-
- remove some epydoc comments about fileno() being non-portable.
-
- modified files:
- paramiko/channel.py
-
-
-2005-04-18 00:30:52 GMT Robey Pointer <robey@lag.net> patch-163
-
- Summary:
- add SFTPClient.close()
- Revision:
- secsh--dev--1.0--patch-163
-
- add SFTPClient.close() and add a simple little unit test for it.
-
-
- modified files:
- paramiko/sftp_client.py tests/test_sftp.py
-
-
-2005-04-18 00:11:34 GMT Robey Pointer <robey@lag.net> patch-162
-
- Summary:
- avoid os.environ['HOME'] in the demos
- Revision:
- secsh--dev--1.0--patch-162
-
- avoid using os.environ['HOME'], which will never work on windows, and
- use os.path.expanduser() instead. it's semi-moot because windows doesn't
- have a standard location for ssh files, but i think paramiko should set a
- good example anyway.
-
- modified files:
- demo.py demo_simple.py
-
-
-2005-04-16 23:38:22 GMT Robey Pointer <robey@lag.net> patch-161
-
- Summary:
- integrated laptop work (test commit)
- Revision:
- secsh--dev--1.0--patch-161
-
- Patches applied:
-
- * robey@lag.net--2003-public-master-shake/secsh--dev--1.0--base-0
- tag of robey@lag.net--2003-public/secsh--dev--1.0--patch-160
-
- * robey@lag.net--2003-public-master-shake/secsh--dev--1.0--patch-1
- test commit
-
- * robey@lag.net--2003-public/secsh--dev--1.0--base-0
- initial import
-
- * robey@lag.net--2003-public/secsh--dev--1.0--patch-1
- no changes
-
-
- modified files:
- README paramiko/server.py
-
- new patches:
- robey@lag.net--2003-public-master-shake/secsh--dev--1.0--base-0
- robey@lag.net--2003-public-master-shake/secsh--dev--1.0--patch-1
-
-
-2005-04-10 00:46:41 GMT Robey Pointer <robey@lag.net> patch-160
-
- Summary:
- 1.3 marowak
- Revision:
- secsh--dev--1.0--patch-160
-
- bump version to 1.3 / marowak
-
- modified files:
- Makefile README paramiko/__init__.py paramiko/transport.py
- setup.py
-
-
-2005-04-10 00:39:18 GMT Robey Pointer <robey@lag.net> patch-159
-
- Summary:
- clean up SFTPAttributes.__repr__
- Revision:
- secsh--dev--1.0--patch-159
-
- clean up SFTPAttributes repr() a bit.
-
- modified files:
- paramiko/sftp_attr.py
-
-
-2005-04-10 00:13:54 GMT Robey Pointer <robey@lag.net> patch-158
-
- Summary:
- remove ChangeLog from MANIFEST.in
- Revision:
- secsh--dev--1.0--patch-158
-
- remove ChangeLog from the dist list.
-
- modified files:
- MANIFEST.in
-
-
-2005-04-06 07:24:28 GMT Robey Pointer <robey@lag.net> patch-157
-
- Summary:
- change SubsystemHandler/SFTPServerInterface API
- Revision:
- secsh--dev--1.0--patch-157
-
- change the API of SubsystemHandler to accept a reference to the
- ServerInstance object during construction. this will break all code
- that currently creates subsystem handlers (like sftp servers) -- sorry!
-
- lots of little doc fixups (mostly indenting).
-
- modified files:
- paramiko/server.py paramiko/sftp_server.py paramiko/sftp_si.py
- paramiko/transport.py tests/stub_sftp.py
-
-
-2005-03-26 05:53:00 GMT Robey Pointer <robey@lag.net> patch-156
-
- Summary:
- rewrite channel pipes to work on windows
- Revision:
- secsh--dev--1.0--patch-156
-
- the pipe system i was using for simulating an os-level FD (for select) was
- retarded. i realized this week that i could just use a single byte in the
- pipe to signal "data is ready" and not try to feed all incoming data thru
- the pipe -- and then i don't have to try to make the pipe non-blocking (which
- should make it work on windows). a lot of duplicate code got removed and now
- it's all going thru the same code-path on read.
-
- there's still a slight penalty on incoming feeds and calling 'recv' when a
- pipe has been opened (by calling 'fileno'), but it's tiny.
-
- removed a bunch of documentation and comments about things not working on
- windows, since i think they probably do now.
-
-
- removed files:
- .arch-ids/demo_windows.py.id demo_windows.py
-
- modified files:
- MANIFEST.in README paramiko/channel.py
-
-
-2005-03-25 20:06:56 GMT Robey Pointer <robey@lag.net> patch-155
-
- Summary:
- fix sending of large sftp packet sizes
- Revision:
- secsh--dev--1.0--patch-155
-
- fix a bug where packets larger than about 12KB would cause the session to
- die on platforms other than osx. turns out that on most platforms, setting a
- socket timeout also causes timeouts to occur on writes (but not on osx). so
- on a huge write, once the os buffers were full, paramiko would get a
- socket.timeout exception when writing, and bail.
-
- since the timeout is primarily so we can periodically poll to see if the
- session has been killed from elsewhere, do that on a timeout but otherwise
- continue trying to write. large packet sizes (in sftp) should now work.
-
- modified files:
- paramiko/transport.py
-
-
-2005-02-28 08:06:08 GMT Robey Pointer <robey@lag.net> patch-154
-
- Summary:
- even better 1.2 lapras
- Revision:
- secsh--dev--1.0--patch-154
-
- re-bump the version # to 1.2 (with a new date since i added more stuff).
- add 2005 to the copyright date in a bunch of files.
-
-
- modified files:
- Makefile README demo.py demo_server.py demo_simple.py
- demo_windows.py forward.py paramiko/__init__.py
- paramiko/auth_transport.py paramiko/ber.py paramiko/channel.py
- paramiko/common.py paramiko/dsskey.py paramiko/file.py
- paramiko/kex_gex.py paramiko/kex_group1.py
- paramiko/logging22.py paramiko/message.py paramiko/pkey.py
- paramiko/primes.py paramiko/rsakey.py paramiko/server.py
- paramiko/sftp.py paramiko/sftp_attr.py paramiko/sftp_client.py
- paramiko/sftp_handle.py paramiko/sftp_server.py
- paramiko/sftp_si.py paramiko/ssh_exception.py
- paramiko/transport.py paramiko/util.py setup.py test.py
- tests/loop.py tests/stub_sftp.py tests/test_file.py
- tests/test_kex.py tests/test_message.py tests/test_pkey.py
- tests/test_sftp.py tests/test_transport.py
-
-
-2005-02-28 07:49:56 GMT Robey Pointer <robey@lag.net> patch-153
-
- Summary:
- tweak sftp_file write behavior on large blocks of data
- Revision:
- secsh--dev--1.0--patch-153
-
- BufferedFile.write() wasn't correctly dealing with the possibility that the
- underlying write might not write the entire data block at once (even though
- the docs said it would). now that it's working, make sftp_file take
- advantage of it in order to chop up blocks larger than 32kB (the max allowed
- on sftp) and add a unit test for it.
-
-
- modified files:
- paramiko/file.py paramiko/sftp_file.py tests/test_sftp.py
-
-
-2005-02-28 07:17:21 GMT Robey Pointer <robey@lag.net> patch-152
-
- Summary:
- little doc fixes
- Revision:
- secsh--dev--1.0--patch-152
-
- stupid little doc fixups that didn't fit with the other patches.
-
- modified files:
- paramiko/auth_transport.py tests/loop.py
-
-
-2005-02-28 07:16:22 GMT Robey Pointer <robey@lag.net> patch-151
-
- Summary:
- fix race in transport thread startup
- Revision:
- secsh--dev--1.0--patch-151
-
- set active=True from the methods that start the main transport thread, right
- before actually starting the thread. this avoids a race where the main
- thread could be started, but the original thread could wake up from the
- event.wait(0.1) before the new thread actually set the transport active.
- impossible, you say? no machines so slow exist? au contraire, my sad
- little linux box faced this problem earlier today.
-
-
- modified files:
- paramiko/transport.py
-
-
-2005-02-28 07:14:11 GMT Robey Pointer <robey@lag.net> patch-150
-
- Summary:
- when combining stderr with stdout on a channel, merge the buffers too
- Revision:
- secsh--dev--1.0--patch-150
-
- when turning on combine-stderr mode on a channel, grab the channel lock and
- feed any existing stderr buffer into the normal buffer. this should help
- applications (and my unit tests) avoid races between data coming in over
- stderr and setting combine-stderr.
-
- _send_eof is now slightly safer too, although i don't think that really fixed
- anything. it just makes me feel better.
-
- modified files:
- paramiko/channel.py
-
-
-2005-02-28 07:09:02 GMT Robey Pointer <robey@lag.net> patch-149
-
- Summary:
- add thread ids to logs
- Revision:
- secsh--dev--1.0--patch-149
-
- add a logging filter that reports the thread-id of the logger, and use
- that for all paramiko logging. since thread-local stuff didn't appear
- until python 2.4, i hacked up my own little version to assign incrementing
- numbers to threads as they log.
-
-
- modified files:
- paramiko/channel.py paramiko/sftp.py paramiko/sftp_client.py
- paramiko/sftp_server.py paramiko/transport.py paramiko/util.py
-
-
-2005-02-26 21:12:43 GMT Robey Pointer <robey@lag.net> patch-148
-
- Summary:
- forgot to check in stub_sftp
- Revision:
- secsh--dev--1.0--patch-148
-
- yikes! don't forget to check this in: needed for unit tests.
-
- new files:
- tests/.arch-ids/stub_sftp.py.id tests/stub_sftp.py
-
-
-2005-02-26 21:11:04 GMT Robey Pointer <robey@lag.net> patch-147
-
- Summary:
- 1.2 (lapras)
- Revision:
- secsh--dev--1.0--patch-147
-
- bump version stuff to 1.2 / lapras.
-
- modified files:
- Makefile README paramiko/__init__.py paramiko/transport.py
- setup.py
-
-
-2005-02-15 15:48:47 GMT Robey Pointer <robey@lag.net> patch-146
-
- Summary:
- raise better exception on empty key
- Revision:
- secsh--dev--1.0--patch-146
-
- raise a clearer exception when trying to create an empty key.
-
-
- modified files:
- README paramiko/dsskey.py paramiko/rsakey.py
- tests/test_transport.py
-
-
-2005-02-15 15:47:02 GMT Robey Pointer <robey@lag.net> patch-145
-
- Summary:
- add methods for sending/receiving a channel's exit status
- Revision:
- secsh--dev--1.0--patch-145
-
- track a channel's exit status and provide a method (recv_exit_status) to
- block waiting for it to arrive. also provide a convenience method for
- servers to send it (send_exit_status). add shutdown_read and shutdown_write.
- fix a bug in sending window change requests.
-
-
- modified files:
- README paramiko/channel.py paramiko/transport.py
-
-
-2005-02-06 23:32:22 GMT Robey Pointer <robey@lag.net> patch-144
-
- Summary:
- fix docs
- Revision:
- secsh--dev--1.0--patch-144
-
- clean up some of the docs.
-
-
- modified files:
- README paramiko/pkey.py paramiko/sftp_attr.py
-
-
-2005-02-06 23:30:40 GMT Robey Pointer <robey@lag.net> patch-143
-
- Summary:
- fix an sftp unit test
- Revision:
- secsh--dev--1.0--patch-143
-
- fix one of the sftp unit tests to actually work.
-
-
- modified files:
- tests/test_sftp.py
-
-
-2005-02-05 07:45:20 GMT Robey Pointer <robey@lag.net> patch-142
-
- Summary:
- fix windows sample script's HOME
- Revision:
- secsh--dev--1.0--patch-142
-
- fix the HOME environ var to work on windows too.
-
- modified files:
- demo_windows.py
-
-
-2005-01-25 05:17:55 GMT Robey Pointer <robey@lag.net> patch-141
-
- Summary:
- misc logging fixes
- Revision:
- secsh--dev--1.0--patch-141
-
- change the level of some log messages so interesting stuff gets logged at
- info instead of debug. fix an oops where channels defaulted to being in
- ultra debug mode, and make this mode depend on a new Transport method:
- "set_hexdump".
-
-
- modified files:
- paramiko/auth_transport.py paramiko/channel.py
- paramiko/sftp.py paramiko/sftp_client.py
- paramiko/sftp_server.py paramiko/transport.py
-
-
-2005-01-17 10:09:09 GMT Robey Pointer <robey@lag.net> patch-140
-
- Summary:
- more flexible logging
- Revision:
- secsh--dev--1.0--patch-140
-
- some tweaks to make channels etc follow the logger setting of their parent
- transport, so that setting the log channel for a paramiko transport will
- cause all sub-logging to branch out from that channel.
-
- also, close all open file handles when the sftp server ends.
-
-
- modified files:
- paramiko/channel.py paramiko/sftp_attr.py
- paramiko/sftp_client.py paramiko/sftp_handle.py
- paramiko/sftp_server.py paramiko/transport.py
-
-
-2005-01-16 21:03:15 GMT Robey Pointer <robey@lag.net> patch-139
-
- Summary:
- make loopback sftp tests the default
- Revision:
- secsh--dev--1.0--patch-139
-
- change the unit tests to default to always running the sftp tests locally,
- and make a -R option to force the tests to run against a remote server.
- the tests seem to work fine locally, and it helps test out server mode,
- even though there's a danger that they could get isolated from reality
- and only test that paramiko can talk to itself.
-
-
- modified files:
- test.py
-
-
-2005-01-16 20:14:07 GMT Robey Pointer <robey@lag.net> patch-138
-
- Summary:
- doc fixups
- Revision:
- secsh--dev--1.0--patch-138
-
- little doc fixups that i did obsessively on the train one morning.
-
- modified files:
- paramiko/file.py
-
-
-2005-01-09 05:27:07 GMT Robey Pointer <robey@lag.net> patch-137
-
- Summary:
- added listdir_attr()
- Revision:
- secsh--dev--1.0--patch-137
-
- add SFTPClient.listdir_attr() to fetch a list of files & their attributes,
- instead of just their filenames. artur piwko would find this useful.
-
-
- modified files:
- paramiko/sftp_attr.py paramiko/sftp_client.py
-
-
-2004-12-19 19:56:48 GMT Robey Pointer <robey@lag.net> patch-136
-
- Summary:
- loopback sftp test
- Revision:
- secsh--dev--1.0--patch-136
-
- add ability to turn off more tests, and a secret (for now) -X option to do
- the sftp tests via loopback socket. added another symlink sftp test to see
- what happens with absolute symlinks.
-
-
- modified files:
- test.py tests/test_sftp.py
-
-
-2004-12-19 19:50:00 GMT Robey Pointer <robey@lag.net> patch-135
-
- Summary:
- more sftp cleanup
- Revision:
- secsh--dev--1.0--patch-135
-
- oops, this should've been part of the last patch.
-
-
- modified files:
- paramiko/sftp_si.py
-
-
-2004-12-19 19:43:27 GMT Robey Pointer <robey@lag.net> patch-134
-
- Summary:
- cleanup & docs in sftp
- Revision:
- secsh--dev--1.0--patch-134
-
- add some more docs to SFTPHandle, and give a default implementation for
- close() that's usually right. add a flush() to the default implementation
- of write(). document that symlink's args in the sftp protocol are out of
- order (the spec is wrong).
-
-
- modified files:
- paramiko/sftp_handle.py paramiko/sftp_server.py
-
-
-2004-12-13 07:32:14 GMT Robey Pointer <robey@lag.net> patch-133
-
- Summary:
- unit test madness
- Revision:
- secsh--dev--1.0--patch-133
-
- add some more testy bits and fix up some other bits.
-
-
- modified files:
- tests/test_transport.py
-
-
-2004-12-13 07:31:01 GMT Robey Pointer <robey@lag.net> patch-132
-
- Summary:
- oops (continued)
- Revision:
- secsh--dev--1.0--patch-132
-
- er, part 2 of that.
-
-
- modified files:
- paramiko/server.py
-
-
-2004-12-13 07:29:38 GMT Robey Pointer <robey@lag.net> patch-131
-
- Summary:
- move check_global_request
- Revision:
- secsh--dev--1.0--patch-131
-
- move check_global_request into the server interface -- i missed it during
- the initial move (oops).
-
-
- modified files:
- paramiko/transport.py
-
-
-2004-12-13 07:27:39 GMT Robey Pointer <robey@lag.net> patch-130
-
- Summary:
- small fixups
- Revision:
- secsh--dev--1.0--patch-130
-
- move _wait_for_send_window into the right place in Channel. remove outdated
- note from auth_transport. fix download url in setup.py.
-
-
-
- modified files:
- paramiko/auth_transport.py paramiko/channel.py setup.py
-
-
-2004-12-12 09:58:40 GMT Robey Pointer <robey@lag.net> patch-129
-
- Summary:
- 1.1 (kabuto)
- Revision:
- secsh--dev--1.0--patch-129
-
- edit various files to bump the version to 1.1.
- also fix to point to the new url.
-
-
- modified files:
- Makefile README paramiko/__init__.py paramiko/transport.py
- setup.py
-
-
-2004-12-12 09:38:24 GMT Robey Pointer <robey@lag.net> patch-128
-
- Summary:
- more unit tests
- Revision:
- secsh--dev--1.0--patch-128
-
- added unit tests for multi-part auth, exec_command, and invoke_shell.
-
-
- modified files:
- tests/test_transport.py
-
-
-2004-12-12 09:32:17 GMT Robey Pointer <robey@lag.net> patch-127
-
- Summary:
- doc fixups
- Revision:
- secsh--dev--1.0--patch-127
-
- fix some typos in sftp_client docs
-
-
- modified files:
- paramiko/sftp_client.py
-
-
-2004-12-12 09:25:15 GMT Robey Pointer <robey@lag.net> patch-126
-
- Summary:
- server support for stderr & exec_command
- Revision:
- secsh--dev--1.0--patch-126
-
- for the server side of my stderr blunder, add send_stderr & sendall_stderr,
- and make the sending side of makefile_stderr work correctly.
-
- also, call check_channel_exec_request on a server object for exec requests
- on a channel.
-
-
- modified files:
- paramiko/channel.py paramiko/server.py
-
-
-2004-12-12 09:16:03 GMT Robey Pointer <robey@lag.net> patch-125
-
- Summary:
- add client-side multi-part auth support
- Revision:
- secsh--dev--1.0--patch-125
-
- added support for multi-part authentication (even though nobody supports it
- that i've seen). on a successful "partial" auth, the auth_* method will
- return a list of acceptable means to continue authenticating.
-
-
- modified files:
- paramiko/auth_transport.py paramiko/ssh_exception.py
-
-
-2004-12-11 03:44:33 GMT Robey Pointer <robey@lag.net> patch-124
-
- Summary:
- docs fixup
- Revision:
- secsh--dev--1.0--patch-124
-
- fix a comment typo, and add @since designators to a couple of new methods.
-
-
- modified files:
- paramiko/channel.py paramiko/sftp_server.py
-
-
-2004-12-11 03:43:18 GMT Robey Pointer <robey@lag.net> patch-123
-
- Summary:
- clean up authentication
- Revision:
- secsh--dev--1.0--patch-123
-
- add new exception "BadAuthenticationType", which is raised when auth fails
- because your auth type (password or public-key) isn't valid on the server.
-
- used this as an excuse to clean up auth_password and auth_publickey so their
- 'event' arg is optional, and if missing, they block until auth is finished,
- raising an exception on error.
-
- also, don't close the session on failed auth -- the server may let you try
- again.
-
- added some test cases for failed auth.
-
-
- modified files:
- paramiko/__init__.py paramiko/auth_transport.py
- paramiko/ssh_exception.py paramiko/transport.py
- tests/test_transport.py
-
-
-2004-12-10 08:30:44 GMT Robey Pointer <robey@lag.net> patch-122
-
- Summary:
- symlink, readlink
- Revision:
- secsh--dev--1.0--patch-122
-
- add support for symlink command, and finish support for readlink. (i guess
- i started readlink a while ago but forgot to add the right method to the
- SFTPServerInterface class.)
-
-
- modified files:
- paramiko/sftp_server.py paramiko/sftp_si.py tests/test_sftp.py
-
-
-2004-12-10 08:27:43 GMT Robey Pointer <robey@lag.net> patch-121
-
- Summary:
- other part of that last patch
- Revision:
- secsh--dev--1.0--patch-121
-
- oops, forgot this part.
-
- modified files:
- paramiko/transport.py
-
-
-2004-12-10 08:25:28 GMT Robey Pointer <robey@lag.net> patch-120
-
- Summary:
- add stderr support methods
- Revision:
- secsh--dev--1.0--patch-120
-
- big embarrassment: i didn't read the ssh2 docs close enough, and all this
- time paramiko wasn't handling "extended_data" packets, which contain stderr
- output.
-
- so now, several new functions: recv_stderr_ready() and recv_stderr() to
- mirror recv_ready() and recv(), and set_combined_stderr() to force stderr
- to be combined into stdout. also, makefile_stderr() to create a fake file
- object to represent stderr.
-
-
- modified files:
- paramiko/channel.py
-
-
-2004-12-10 07:55:33 GMT Robey Pointer <robey@lag.net> patch-119
-
- Summary:
- reformat README
- Revision:
- secsh--dev--1.0--patch-119
-
- reformatted the README to a slightly smaller margin, just because.
-
-
- modified files:
- README
-
-
-2004-12-09 04:15:12 GMT Robey Pointer <robey@lag.net> patch-118
-
- Summary:
- fix SFTPFile gettimeout/settimeout
- Revision:
- secsh--dev--1.0--patch-118
-
- i don't think the gettimeout/settimeout calls on SFTPFile ever worked.
- also, simplify the implementation of _get_size() since it's nearly
- identical to stat().
-
-
- modified files:
- paramiko/sftp_file.py
-
-
-2004-12-09 02:42:36 GMT Robey Pointer <robey@lag.net> patch-117
-
- Summary:
- readme comments
- Revision:
- secsh--dev--1.0--patch-117
-
- add another fixme to the readme
-
- modified files:
- README
-
-
-2004-11-26 22:07:31 GMT Robey Pointer <robey@lag.net> patch-116
-
- Summary:
- doc fixups
- Revision:
- secsh--dev--1.0--patch-116
-
- explain "recv_ready" better, and add debug descriptions for the kex codes.
-
-
- modified files:
- README paramiko/channel.py paramiko/common.py
-
-
-2004-11-25 19:39:34 GMT Robey Pointer <robey@lag.net> patch-115
-
- Summary:
- fix CONNECTION_FAILED_CODE
- Revision:
- secsh--dev--1.0--patch-115
-
- oops, fix typo in channel request failed.
-
- modified files:
- paramiko/transport.py
-
-
-2004-11-22 07:40:39 GMT Robey Pointer <robey@lag.net> patch-114
-
- Summary:
- fix typo in channel
- Revision:
- secsh--dev--1.0--patch-114
-
- fix typo that alain found: pipd_wfd -> pipe_wfd.
-
-
- modified files:
- paramiko/channel.py
-
-
-2004-11-22 07:27:21 GMT Robey Pointer <robey@lag.net> patch-113
-
- Summary:
- sftp server support!
- Revision:
- secsh--dev--1.0--patch-113
-
- finally check in sftp_handle (file handle abstraction), sftp_si (server
- interface), and sftp_server (server implementation) -- all of which make
- a roughly 90% implementation of server-side sftp.
-
-
-
- new files:
- paramiko/.arch-ids/sftp_handle.py.id
- paramiko/.arch-ids/sftp_server.py.id
- paramiko/.arch-ids/sftp_si.py.id paramiko/sftp_handle.py
- paramiko/sftp_server.py paramiko/sftp_si.py
-
- modified files:
- README demo_windows.py paramiko/__init__.py
-
-
-2004-11-22 07:07:08 GMT Robey Pointer <robey@lag.net> patch-112
-
- Summary:
- add finish_subsystem()
- Revision:
- secsh--dev--1.0--patch-112
-
- when a SubsystemHandler is being decomissioned (the client has closed the
- channel or transport, or the socket went away), make a callback to let the
- handler do any shutdown it needs to.
-
-
- modified files:
- paramiko/server.py
-
-
-2004-11-22 07:04:31 GMT Robey Pointer <robey@lag.net> patch-111
-
- Summary:
- fix extremely unlikely channel counter wrapping
- Revision:
- secsh--dev--1.0--patch-111
-
- Transport's channel counter can overflow after 4 billion some channels are
- created. make it wrap back around after 16 million instead. also allow the
- logging channel to be set manually. fix some comments elsewhere.
-
-
- modified files:
- paramiko/channel.py paramiko/primes.py paramiko/transport.py
-
-
-2004-11-22 07:01:43 GMT Robey Pointer <robey@lag.net> patch-110
-
- Summary:
- fix Transport.get_username() to work in server mode too
- Revision:
- secsh--dev--1.0--patch-110
-
- whenever i split the 'username' field into username and auth_username,
- i guess that made get_username() stop working for server mode (because the
- username was stored in a different field). this should fix it.
-
- modified files:
- paramiko/auth_transport.py
-
-
-2004-11-07 03:10:53 GMT Robey Pointer <robey@lag.net> patch-109
-
- Summary:
- v1.0 (jigglypuff)
- Revision:
- secsh--dev--1.0--patch-109
-
- bump all the version numbers up to 1.0 (jigglypuff).
-
- modified files:
- Makefile README paramiko/__init__.py paramiko/transport.py
- setup.py
-
-
-2004-11-07 02:51:42 GMT Robey Pointer <robey@lag.net> patch-108
-
- Summary:
- add filename to SFTPAttributes
- Revision:
- secsh--dev--1.0--patch-108
-
- add filename to the attributes stored in an SFTPAttributes object.
-
- modified files:
- paramiko/sftp_attr.py
-
-
-2004-11-07 02:31:48 GMT Robey Pointer <robey@lag.net> patch-107
-
- Summary:
- fix kex_gex
- Revision:
- secsh--dev--1.0--patch-107
-
- fix kex_gex (group-exchange key exchange) to, *cough*, work again, and also
- layout kex_group1 a little more sanely.
-
- modified files:
- paramiko/kex_gex.py paramiko/kex_group1.py
-
-
-2004-11-07 02:29:54 GMT Robey Pointer <robey@lag.net> patch-106
-
- Summary:
- fix chmod +x on demo_windows.py
- Revision:
- secsh--dev--1.0--patch-106
-
- forgot to make demo_windows +x
-
-
-2004-11-07 02:29:20 GMT Robey Pointer <robey@lag.net> patch-105
-
- Summary:
- move ChangeLog
- Revision:
- secsh--dev--1.0--patch-105
-
- move ChangeLog out of the way because tla can autogenerate any useful
- ChangeLog.
-
-
- renamed files:
- .arch-ids/ChangeLog.id
- ==> .arch-ids/ChangeLog-old.id
- ChangeLog
- ==> ChangeLog-old
-
-
-2004-11-07 02:28:33 GMT Robey Pointer <robey@lag.net> patch-104
-
- Summary:
- fix location of SFTPError
- Revision:
- secsh--dev--1.0--patch-104
-
- fix location of SFTPError.
-
- modified files:
- paramiko/__init__.py paramiko/sftp_client.py
-
-
-2004-11-07 02:17:18 GMT Robey Pointer <robey@lag.net> patch-103
-
- Summary:
- rename sftp constants
- Revision:
- secsh--dev--1.0--patch-103
-
- replace oddly named sftp constants (FX_OK for example) with names that make
- a bit more sense when sober (SFTP_OK).
-
- modified files:
- paramiko/__init__.py paramiko/sftp.py paramiko/sftp_client.py
-
-
-2004-11-07 02:08:11 GMT Robey Pointer <robey@lag.net> patch-102
-
- Summary:
- add key exchange tests + 1 more sftp test
- Revision:
- secsh--dev--1.0--patch-102
-
- add test suite for key-exchange protocols, since i apparently broke the
- "gex" protocol recently and never noticed. also add an sftp unit test for
- mkdir/rmdir.
-
- new files:
- tests/.arch-ids/test_kex.py.id tests/test_kex.py
-
- modified files:
- test.py tests/test_sftp.py
-
-
-2004-11-07 02:00:50 GMT Robey Pointer <robey@lag.net> patch-101
-
- Summary:
- remove old demo keys
- Revision:
- secsh--dev--1.0--patch-101
-
- the keys are in tests/ now.
-
- removed files:
- .arch-ids/demo_dss_key.id .arch-ids/demo_rsa_key.id
- demo_dss_key demo_rsa_key
-
-
-2004-11-06 20:32:08 GMT Robey Pointer <robey@lag.net> patch-100
-
- Summary:
- don't forget demo_windows.py
- Revision:
- secsh--dev--1.0--patch-100
-
- update MANIFEST.in to include demo_windows.py and not include the demo
- keys (they're in tests/ now). clean up the README to explain the demo
- scripts better now, since there are so many of them. then fix up the
- demo scripts to look in tests/ for the keys.
-
- demo_windows.py doesn't need to call get_pty() (in fact, i think that's
- blowing openssh's mind) and was executing the wrong command.
-
-
- modified files:
- MANIFEST.in README demo_server.py demo_simple.py
- demo_windows.py
-
-
-2004-11-01 07:07:48 GMT Robey Pointer <robey@lag.net> patch-99
-
- Summary:
- use getpass
- Revision:
- secsh--dev--1.0--patch-99
-
- convert raw_input to getpass as suggested many weeks ago.
-
- modified files:
- forward.py
-
-
-2004-11-01 03:54:01 GMT Robey Pointer <robey@lag.net> patch-98
-
- Summary:
- don't unlink a Channel until the server closes it too
- Revision:
- secsh--dev--1.0--patch-98
-
- when close()'ing a Channel, don't immediately unlink it from the Transport.
- instead, wait for the server to send a close message.
-
- this should fix a bug where doing close() on an EOF'd channel would cause
- the entire transport to be killed, because the server would send an
- 'exit-status' and 'close' message for a channel that we no longer had a
- record of.
-
-
- modified files:
- paramiko/channel.py
-
-
-2004-11-01 03:43:28 GMT Robey Pointer <robey@lag.net> patch-97
-
- Summary:
- better debugging, improve subsytem handler
- Revision:
- secsh--dev--1.0--patch-97
-
- add a list of ssh packet names for debugging. improve the server-mode
- subsystem handler so it can take extra parameters (list or keyword) and
- pass them to the subsystem constructor. remove a misleading comment
- about rekeying (which was already implemented).
-
-
- modified files:
- paramiko/common.py paramiko/server.py paramiko/transport.py
-
-
-2004-11-01 03:37:42 GMT Robey Pointer <robey@lag.net> patch-96
-
- Summary:
- remove key.valid check
- Revision:
- secsh--dev--1.0--patch-96
-
- oops! 'key.valid' no longer works -- catch the SSHException instead, and log
- it.
-
-
- modified files:
- paramiko/auth_transport.py
-
-
-2004-10-23 07:36:23 GMT Robey Pointer <robey@lag.net> patch-95
-
- Summary:
- ivysaur 0.9
- Revision:
- secsh--dev--1.0--patch-95
-
- update ivysaur release date, and add the list of changes to the README
- file.
-
-
- modified files:
- Makefile README paramiko/__init__.py
-
-
-2004-10-20 16:52:51 GMT Robey Pointer <robey@lag.net> patch-94
-
- Summary:
- start testing Transport
- Revision:
- secsh--dev--1.0--patch-94
-
- the beginnings of tests for Transport. only the bare minimum is there right
- now.
-
- also started doc'ing things up to ivysaur.
-
- new files:
- .arch-ids/demo_windows.py.id demo_windows.py
- tests/.arch-ids/loop.py.id
- tests/.arch-ids/test_transport.py.id tests/loop.py
- tests/test_transport.py
-
- modified files:
- Makefile README paramiko/__init__.py setup.py test.py
-
-
-2004-10-18 04:54:27 GMT Robey Pointer <robey@lag.net> patch-93
-
- Summary:
- switch Transport.connect() to using a Pkey object for the host key
- Revision:
- secsh--dev--1.0--patch-93
-
- i suddenly realized that passing "hostkeytype" and "hostkey" as strings to
- Transport.connect() was pretty silly since i went to all the effort of making
- a class specifically for holding keys. so Transport.connect() now just takes
- host-key argument: "hostkey" as a PKey object.
-
- updated the demos to use PKey objects when reading the host key file, and to
- use the new "hostkey" argument.
-
-
- modified files:
- demo.py demo_simple.py paramiko/pkey.py paramiko/transport.py
-
-
-2004-09-25 22:07:59 GMT Robey Pointer <robey@lag.net> patch-92
-
- Summary:
- add rsa/dss key object unit tests
- Revision:
- secsh--dev--1.0--patch-92
-
- add tests for rsa/dss key objects -- yay!
-
-
- new files:
- tests/.arch-ids/test_dss.key.id
- tests/.arch-ids/test_pkey.py.id
- tests/.arch-ids/test_rsa.key.id tests/test_dss.key
- tests/test_pkey.py tests/test_rsa.key
-
-
-2004-09-25 22:03:48 GMT Robey Pointer <robey@lag.net> patch-91
-
- Summary:
- fix test.py to use options instead of env vars, sftp tests default off
- Revision:
- secsh--dev--1.0--patch-91
-
- fix up the test framework so that the sftp unit tests aren't always run (you
- have to ask for them explicitly) and they take their configuration from
- command-line options. they still require a remote server.
-
- modified files:
- test.py tests/test_sftp.py
-
-
-2004-09-25 21:58:11 GMT Robey Pointer <robey@lag.net> patch-90
-
- Summary:
- fix __init__
- Revision:
- secsh--dev--1.0--patch-90
-
- fix __init__ to export BufferedFile and randpool, and to catch up with the
- changes from a week or 2 ago where sftp_attr & friends were split off.
-
- modified files:
- paramiko/__init__.py
-
-
-2004-09-25 21:47:19 GMT Robey Pointer <robey@lag.net> patch-89
-
- Summary:
- fix some Transport docs
- Revision:
- secsh--dev--1.0--patch-89
-
- document that Transport also would like close() and settimeout() to exist
- on the socket-like object passed to the constructor.
-
- modified files:
- paramiko/transport.py
-
-
-2004-09-25 21:32:53 GMT Robey Pointer <robey@lag.net> patch-88
-
- Summary:
- add Message.rewind()
- Revision:
- secsh--dev--1.0--patch-88
-
- add rewind() method to Message, which just resets the pointer so you can
- start reading from the beginning again. this is useful for some tests.
-
- modified files:
- paramiko/message.py tests/test_message.py
-
-
-2004-09-25 21:28:23 GMT Robey Pointer <robey@lag.net> patch-87
-
- Summary:
- clean up pkey interface
- Revision:
- secsh--dev--1.0--patch-87
-
- change the pkey interface so that it's no longer possible to have a pkey
- that doesn't represent a valid key. (ie: no more "blank" key objects.)
- also add "get_bits" and "can_sign" methods to determine the key bit length
- and whether it can sign things (contains the "private parts") respectively.
-
- modified files:
- paramiko/dsskey.py paramiko/pkey.py paramiko/rsakey.py
-
-
-2004-09-11 21:01:32 GMT Robey Pointer <robey@lag.net> patch-86
-
- Summary:
- unit tests for Message
- Revision:
- secsh--dev--1.0--patch-86
-
- spanking new unit tests for Message. i'm trying to fix the embarrassment
- of having so little of paramiko testable. next up is Transport!
-
- new files:
- tests/.arch-ids/test_message.py.id tests/test_message.py
-
-
-2004-09-11 20:56:01 GMT Robey Pointer <robey@lag.net> patch-85
-
- Summary:
- move SFTPFile and SFTPAttributes into their own files
- Revision:
- secsh--dev--1.0--patch-85
-
- move SFTPFile and SFTPAttributes into their own files.
-
- new files:
- paramiko/.arch-ids/sftp_attr.py.id
- paramiko/.arch-ids/sftp_file.py.id paramiko/sftp_attr.py
- paramiko/sftp_file.py
-
- modified files:
- paramiko/sftp.py paramiko/sftp_client.py
-
-
-2004-09-11 20:50:39 GMT Robey Pointer <robey@lag.net> patch-84
-
- Summary:
- add sftp.normalize
- Revision:
- secsh--dev--1.0--patch-84
-
- kevin c. dorff pointed out that it would be nice to expose a way to
- determine the server's "current working directory", so this new method
- (normalize) directly maps to REALPATH.
-
- modified files:
- paramiko/sftp_client.py
-
-
-2004-09-11 20:43:09 GMT Robey Pointer <robey@lag.net> patch-83
-
- Summary:
- tweak Message.add() in the key exchanges
- Revision:
- secsh--dev--1.0--patch-83
-
- use the new Message.add() behavior to make a little code here much easier
- to read.
-
- modified files:
- paramiko/kex_gex.py paramiko/kex_group1.py
-
-
-2004-09-11 20:40:08 GMT Robey Pointer <robey@lag.net> patch-82
-
- Summary:
- doc fixes
- Revision:
- secsh--dev--1.0--patch-82
-
- fix "string" -> "str" in types when documenting BufferedFile.
-
- modified files:
- paramiko/file.py
-
-
-2004-09-11 20:37:59 GMT Robey Pointer <robey@lag.net> patch-81
-
- Summary:
- more unit tests
- Revision:
- secsh--dev--1.0--patch-81
-
- add test for BufferedFile.read(-1) and sftp.normalize().
-
- modified files:
- tests/test_file.py tests/test_sftp.py
-
-
-2004-09-11 20:36:49 GMT Robey Pointer <robey@lag.net> patch-80
-
- Summary:
- move SubsystemHandler to server.py
- Revision:
- secsh--dev--1.0--patch-80
-
- move SubsystemHandler into server.py where it makes more sense (it's part of
- the server interface).
-
- also fix up paramiko's "version string" used in ssh2 negotiation to stop
- saying "pyssh" and start saying "paramiko". :)
-
- modified files:
- paramiko/server.py paramiko/transport.py
-
-
-2004-09-11 20:35:19 GMT Robey Pointer <robey@lag.net> patch-79
-
- Summary:
- Message.add() can take many args
- Revision:
- secsh--dev--1.0--patch-79
-
- a bit of cleanup to Message: add() can now take any number of params, and
- will add them all in order (using type guessing).
-
- modified files:
- paramiko/message.py
-
-
-2004-09-09 01:36:45 GMT Robey Pointer <robey@lag.net> patch-78
-
- Summary:
- fix rbuffer -> _rbuffer in 3 places i missed
- Revision:
- secsh--dev--1.0--patch-78
-
- fix 3 places where "rbuffer" hadn't been converted to "_rbuffer". thanks to
- kevin c. dorff for the bug report.
-
- modified files:
- paramiko/file.py
-
-
-2004-09-07 06:56:49 GMT Robey Pointer <robey@lag.net> patch-77
-
- Summary:
- docs for SubsystemHandler
- Revision:
- secsh--dev--1.0--patch-77
-
- add documentation to constructor for SubsystemHandler.
-
- modified files:
- paramiko/transport.py
-
-
-2004-09-07 06:54:31 GMT Robey Pointer <robey@lag.net> patch-76
-
- Summary:
- add sftp_client.py
- Revision:
- secsh--dev--1.0--patch-76
-
- i retardedly forgot to import this file a few days ago: it's the split-out
- client mode for sftp. it now also has some changes to adapt it to the
- improved SFTPAttributes object API.
-
- new files:
- paramiko/.arch-ids/sftp_client.py.id paramiko/sftp_client.py
-
-
-2004-09-07 06:51:03 GMT Robey Pointer <robey@lag.net> patch-75
-
- Summary:
- clean up SFTPAttributes
- Revision:
- secsh--dev--1.0--patch-75
-
- add english descriptions to the FX_* error codes of sftp. clean up (and
- document) SFTPAttributes since it's exported now, and make it simple to
- generate one from a python os.stat object. make "_pythonize" the default --
- that is, just use the same field names as python does for os.stat. (i'm not
- sure why i didn't do it that way in the first place; probably ignorance.)
- also add str() method that converts the SFTPAttributes into a string suitable
- for use in ls (used in an obscure way in sftp servers).
-
- modified files:
- paramiko/sftp.py
-
-
-2004-09-07 06:45:53 GMT Robey Pointer <robey@lag.net> patch-74
-
- Summary:
- note pycrypto 2.0 in README
- Revision:
- secsh--dev--1.0--patch-74
-
- update the README to note that pycrypto 2.0 works (i just tried it). also
- fix the name from pyCrypt back to pycrypto -- that project is having trouble
- making up its mind about naming. :)
-
- modified files:
- README
-
-
-2004-09-05 07:44:03 GMT Robey Pointer <robey@lag.net> patch-73
-
- Summary:
- split sftp into sftp, sftp_client; renamed SFTP -> SFTPClient
- Revision:
- secsh--dev--1.0--patch-73
-
- add sftp_client file, and split out the common code (sftp) from stuff specific
- to client mode (sftp_client). renamed SFTP class to SFTPClient, but left an
- alias so old code will still work.
-
- renamed a bunch of sftp constants now that they're better hidden from epydoc.
-
- modified files:
- README paramiko/__init__.py paramiko/sftp.py
-
-
-2004-09-05 07:41:45 GMT Robey Pointer <robey@lag.net> patch-72
-
- Summary:
- some framework for adding subsystem handlers in server mode
- Revision:
- secsh--dev--1.0--patch-72
-
- you can now register a subsystem with a Transport by passing in the name
- (like "sftp") and a class (like a hypothetical SFTPServer). the default
- ServerInterface.check_channel_request_subsystem now checks this table in
- Transport, and if it finds a match, it creates a new thread for the handler
- and calls into it. a new class SubsystemHandler is added for this purpose
- (to be subclassed).
-
- modified files:
- paramiko/server.py paramiko/transport.py
-
-
-2004-09-05 07:37:40 GMT Robey Pointer <robey@lag.net> patch-71
-
- Summary:
- remove redundant 'auth_complete' member
- Revision:
- secsh--dev--1.0--patch-71
-
- remove the redundant 'auth_complete' field and just use 'authenticated' for
- both client and server mode. this makes the repr() string look correct in
- server mode instead of always claiming that the transport is un-auth'd.
-
- modified files:
- paramiko/auth_transport.py
-
-
-2004-09-03 22:39:20 GMT Robey Pointer <robey@lag.net> patch-70
-
- Summary:
- clean up server interface; no longer need to subclass Channel
- Revision:
- secsh--dev--1.0--patch-70
-
- - export AUTH_*, OPEN_FAILED_*, and the new OPEN_SUCCEEDED into the paramiko
- namespace instead of making people dig into paramiko.Transport.AUTH_* etc.
- - move all of the check_* methods from Channel to ServerInterface so apps
- don't need to subclass Channel anymore just to run an ssh server
- - ServerInterface.check_channel_request() returns an error code now, not a
- new Channel object
- - fix demo_server.py to follow all these changes
- - fix a bunch of places where i used "string" in docstrings but meant "str"
- - added Channel.get_id()
-
- modified files:
- README demo_server.py paramiko/__init__.py
- paramiko/auth_transport.py paramiko/channel.py
- paramiko/common.py paramiko/server.py paramiko/sftp.py
- paramiko/transport.py
-
-
-2004-08-31 02:44:56 GMT Robey Pointer <robey@lag.net> patch-69
-
- Summary:
- clean up SecurityOptions
- Revision:
- secsh--dev--1.0--patch-69
-
- the preferences are now tuples in Transport, and passed as tuples out of
- SecurityOptions, so that the options can't be modified without setting them
- back to the options field again. the algorithm lists in Transport are used
- to validate the fields.
-
- modified files:
- paramiko/transport.py
-
-
-2004-08-30 20:22:10 GMT Robey Pointer <robey@lag.net> patch-68
-
- Summary:
- added Transport.get_security_options()
- Revision:
- secsh--dev--1.0--patch-68
-
- just something i wanted to play with:
- added Transport.get_security_options() which returns a SecurityOptions object.
- this object is a kind of proxy for the 4 "preferred_*" fields in Transport,
- and lets me avoid exposing those fields directly in case i change my mind
- later about how they should be stored.
-
- added some docs to Channel explaining that the request methods now return
- True/False, and fixed up docs in a few other places.
-
- modified files:
- paramiko/__init__.py paramiko/channel.py paramiko/server.py
- paramiko/sftp.py paramiko/transport.py
-
-
-2004-08-28 04:21:12 GMT Robey Pointer <robey@lag.net> patch-67
-
- Summary:
- replay patch 63 (missing channel changes)
- Revision:
- secsh--dev--1.0--patch-67
-
- i'm still getting the hang of tla/arch, obviously.
-
- replay patch 63, which was meant to be part of the later mega-patch, but
- apparently when i reversed it, i lost it entirely.
-
- modified files:
- paramiko/channel.py
-
-
-2004-08-27 00:57:40 GMT Robey Pointer <robey@lag.net> patch-66
-
- Summary:
- new ServerInterface class, outbound rekey works, etc.
- Revision:
- secsh--dev--1.0--patch-66
-
- a bunch of changes that i'm too lazy to split out into individual patches:
- * all the server overrides from transport.py have been moved into a separate
- class ServerInterface, so server code doesn't have to subclass the whole
- paramiko library
- * updated demo_server to subclass ServerInterface
- * when re-keying during a session, block other messages until the new keys
- are activated (openssh doensn't like any other traffic during a rekey)
- * re-key when outbound limits are tripped too (was only counting inbound
- traffic)
- * don't log scary things on EOF
-
-
- new files:
- paramiko/.arch-ids/server.py.id paramiko/server.py
-
- modified files:
- README demo_server.py paramiko/__init__.py
- paramiko/auth_transport.py paramiko/transport.py
-
-
-2004-08-27 00:28:33 GMT Robey Pointer <robey@lag.net> patch-65
-
- Summary:
- add settimeout/gettimeout/setblocking, some bugfixes.
- Revision:
- secsh--dev--1.0--patch-65
-
- hide the command and response codes in sftp so they aren't exported.
- add settimeout/gettimeout/setblocking that just wrap calls to the underlying
- socket or channel. fix _read_all to not catch timeout exceptions.
-
-
- modified files:
- paramiko/sftp.py
-
-
-2004-08-27 00:26:35 GMT Robey Pointer <robey@lag.net> patch-64
-
- Summary:
- reverse messed-up patch
- Revision:
- secsh--dev--1.0--patch-64
-
- Patches applied:
-
- * robey@lag.net--2003-public/secsh--dev--1.0--base-0
- initial import
-
- * robey@lag.net--2003-public/secsh--dev--1.0--patch-1
- no changes
-
-
- modified files:
- paramiko/channel.py {arch}/=tagging-method
-
-
-2004-08-27 00:06:42 GMT Robey Pointer <robey@lag.net> patch-63
-
- Summary:
- add settimeout/gettimeout/setblocking, some bugfixes.
- Revision:
- secsh--dev--1.0--patch-63
-
- hide the command and response codes in sftp so they aren't exported.
- add settimeout/gettimeout/setblocking that just wrap calls to the underlying
- socket or channel. fix _read_all to not catch timeout exceptions.
-
- modified files:
- paramiko/channel.py
-
-
-2004-06-27 20:14:15 GMT Robey Pointer <robey@lag.net> patch-62
-
- Summary:
- version -> horsea
- Revision:
- secsh--dev--1.0--patch-62
-
- up version to horsea.
-
- modified files:
- Makefile README paramiko/__init__.py setup.py
- {arch}/secsh/secsh--dev/secsh--dev--1.0/robey@lag.net--2003-public/patch-log/patch-1
-
-
-2004-06-10 18:12:00 GMT Robey Pointer <robey@lag.net> patch-61
-
- Summary:
- no more Foobar
- Revision:
- secsh--dev--1.0--patch-61
-
- fix "Foobar" to be "Paramiko" in the one place i missed it in all the gpl
- headers. sigh. :)
-
- modified files:
- paramiko/__init__.py paramiko/auth_transport.py
- paramiko/ber.py paramiko/common.py paramiko/dsskey.py
- paramiko/kex_gex.py paramiko/kex_group1.py
- paramiko/logging22.py paramiko/message.py paramiko/pkey.py
- paramiko/primes.py paramiko/rsakey.py
- paramiko/ssh_exception.py paramiko/util.py test.py
-
-
-2004-06-10 18:08:50 GMT Robey Pointer <robey@lag.net> patch-60
-
- Summary:
- limit read/write requests to 32KB, advertise 32KB max packet size
- Revision:
- secsh--dev--1.0--patch-60
-
- one of the unit tests was failing because the openssh sftp server was dropping
- the connection without any error. turns out they have a maximum allowed write
- size (possibly around 64KB). the sftp rfcs have a small hint that some servers
- may drop read/write requests of greater than 32KB.
-
- so, all reads are limited to 32KB, and all writes > 32KB are now chopped up
- and sent in 32KB chunks. this seems to keep openssh happy.
-
- also, we now advertise 32KB max packet size instead of 8KB (the speed
- improves a lot), and log when we read/write a packet. and sftp files are
- flushed on seek.
-
- modified files:
- paramiko/sftp.py paramiko/transport.py
-
-
-2004-06-10 18:02:13 GMT Robey Pointer <robey@lag.net> patch-59
-
- Summary:
- speed up parts of BufferedFile
- Revision:
- secsh--dev--1.0--patch-59
-
- BufferedFile uses cStringIO for the write buffer now (i don't actually notice
- any speed difference so this might revert later) and the default buffer size
- has been upped from 1KB to 8KB.
-
- when scanning for linefeeds (when writing to a line-buffered file), only scan
- the newly-written bytes, since we know all the previously buffered data is
- linefeed-free. this was the #1 slowdown on the 1MB-file unit test.
-
- also, limit the buffering on line-buffered files to whatever the default
- buffer size is. there's no reason to buffer 1MB waiting for a linefeed.
-
- modified files:
- paramiko/file.py
-
-
-2004-06-10 17:55:17 GMT Robey Pointer <robey@lag.net> patch-58
-
- Summary:
- some Channel fixes for max packet size & blocking on zero window
- Revision:
- secsh--dev--1.0--patch-58
-
- some clean-ups and fixes to channels:
- * when send() is blocked on a zero-width window, check that the channel is
- still open. this was causing some lockups.
- * set a lower bound to the "maximum packet size" we accept from the remote
- host. if they tell us anything less than 1KB, assume they meant 1KB. (it's
- not reasonable to fragment below that.)
- * leave a little padding instead of cutting right up to the maximum packet
- size: some space will be taken up by protocol overhead.
- * turn off some of the debug log lines unless "ultra_debug" is on (nobody
- cares about the feed info)
-
-
- modified files:
- paramiko/channel.py
-
-
-2004-06-10 17:35:30 GMT Robey Pointer <robey@lag.net> patch-57
-
- Summary:
- more unit tests
- Revision:
- secsh--dev--1.0--patch-57
-
- add a unit test for sending a large (1MB) file with line buffering but no
- linefeeds (this triggered several bugs and inefficiencies), and another test
- to verify that the write buffer is flushed on seek.
-
- modified files:
- tests/test_file.py tests/test_sftp.py
-
-
-2004-05-31 23:48:10 GMT Robey Pointer <robey@lag.net> patch-56
-
- Summary:
- add forward.py demo script; bump to gyarados
- Revision:
- secsh--dev--1.0--patch-56
-
- add a demo script to show how to do local port forwarding.
-
- add gyarados to all the docs and bump the version number everywhere.
-
- new files:
- .arch-ids/forward.py.id forward.py
-
- modified files:
- MANIFEST.in Makefile README paramiko/__init__.py setup.py
-
-
-2004-05-29 18:58:11 GMT Robey Pointer <robey@lag.net> patch-55
-
- Summary:
- add an sftp unit test for making 100 files
- Revision:
- secsh--dev--1.0--patch-55
-
- create 100 files on the remote server, set their mode with chmod, then verify
- that they're all there and contain the right data. valeriy is reporting that
- sometimes he's getting stuck after 20 and though i'm not seeing it, i want to
- add a test to try to pin it down.
-
- modified files:
- tests/test_sftp.py
-
-
-2004-05-29 18:56:10 GMT Robey Pointer <robey@lag.net> patch-54
-
- Summary:
- add direct-tcpip ability to open_channel
- Revision:
- secsh--dev--1.0--patch-54
-
- open_channel can now be given a dest_addr and src_addr, which are filled in
- if the channel type is "forwarded-tcpip" or "direct-tcpip". these channel
- types are used in remote & local port forwarding, respectively. i've only
- tested "direct-tcpip" but i think if one works, they both should work.
-
- also fixed a bug in connect where it was still assuming the old meaning for
- get_remove_server_key() (oops!) and changed the sense of a send() failure
- from <= 0 to < 0 since it may be possible for send() to return 0 and it not
- be an EOF error.
-
- modified files:
- paramiko/transport.py
-
-
-2004-05-29 18:48:23 GMT Robey Pointer <robey@lag.net> patch-53
-
- Summary:
- add note about utf8 encodings
- Revision:
- secsh--dev--1.0--patch-53
-
- add info to the README about what to do if python complains about missing
- encodings. veleriy pogrebitskiy ran into this and had advice.
-
- modified files:
- README
-
-
-2004-05-17 07:41:50 GMT Robey Pointer <robey@lag.net> patch-52
-
- Summary:
- fix deadlock in closing a channel
- Revision:
- secsh--dev--1.0--patch-52
-
- closing a channel would enter an odd codepath where the lock was grabbed,
- some stuff was done, then another function was called where the lock was
- grabbed again. unfortunately python locks aren't monitors so this would
- deadlock. instead, make the smaller function lock-free with an explicit
- notice that you must be holding the lock before calling.
-
- modified files:
- paramiko/channel.py
-
-
-2004-05-17 00:43:43 GMT Robey Pointer <robey@lag.net> patch-51
-
- Summary:
- fix utf8, raise packet size, log exceptions, be more lax with sfp servers
- Revision:
- secsh--dev--1.0--patch-51
-
- explicitly import utf8 encodings for "freezing" (and also because not all
- platforms come with utf8, apparently). raise the max acceptable packet size
- to 8kB, cuz 2kB was too low. log exceptions at error level instead of debug
- level. and don't reject older sftp servers.
-
- modified files:
- paramiko/auth_transport.py paramiko/sftp.py
- paramiko/transport.py
-
-
-2004-04-23 22:55:16 GMT Robey Pointer <robey@lag.net> patch-50
-
- Summary:
- fearow date and last-minute fixes
- Revision:
- secsh--dev--1.0--patch-50
-
- update release date of fearow to 23apr. fix channel._set_closed() to grab
- the lock before notifying the in/out buffers that the channel is closed.
- try roger's trick for finding the home folder on windows.
-
- modified files:
- Makefile README paramiko/__init__.py paramiko/channel.py
- paramiko/common.py
-
-
-2004-04-08 06:31:08 GMT Robey Pointer <robey@lag.net> patch-49
-
- Summary:
- fix doc typos
- Revision:
- secsh--dev--1.0--patch-49
-
-
- modified files:
- paramiko/dsskey.py paramiko/rsakey.py paramiko/transport.py
-
-
-2004-04-08 05:48:16 GMT Robey Pointer <robey@lag.net> patch-48
-
- Summary:
- set version number to fearow
- Revision:
- secsh--dev--1.0--patch-48
-
- set version number to fearow.
-
- modified files:
- Makefile README paramiko/__init__.py setup.py
-
-
-2004-04-08 05:12:20 GMT Robey Pointer <robey@lag.net> patch-47
-
- Summary:
- add socket.timeout for py22
- Revision:
- secsh--dev--1.0--patch-47
-
- oops, forgot this vital part of the py22 patches. roger binns sent me a
- code patch that included this snip.
-
- modified files:
- paramiko/common.py
-
-
-2004-04-07 16:05:48 GMT Robey Pointer <robey@lag.net> patch-46
-
- Summary:
- README update notes
- Revision:
- secsh--dev--1.0--patch-46
-
- added notes on what's new, what to watch out for in py22. added a "since:
- fearow" to all the relevant API calls that are new.
-
- modified files:
- README paramiko/auth_transport.py paramiko/dsskey.py
- paramiko/pkey.py paramiko/rsakey.py paramiko/transport.py
-
-
-2004-04-07 15:52:07 GMT Robey Pointer <robey@lag.net> patch-45
-
- Summary:
- add set_keepalive()
- Revision:
- secsh--dev--1.0--patch-45
-
- add set_keepalive() to set an automatic keepalive mechanism. (while waiting
- for a packet on a connection, we periodically check if it's time to send a
- keepalive packet.)
-
- modified files:
- paramiko/transport.py
-
-
-2004-04-07 06:07:29 GMT Robey Pointer <robey@lag.net> patch-44
-
- Summary:
- add get_username() method for remembering who you auth'd as
- Revision:
- secsh--dev--1.0--patch-44
-
- add get_username() method for remembering who you auth'd as. also, fix these
- bugs:
- * "continue" auth response counted as a failure (in server mode).
- * try to import 'logging' in py22 before falling back to the fake logger,
- in case they have a backported version of 'logger'
- * raise the right exception when told to read a private key from a file that
- isn't a private key file
- * tell channels to close when the transport dies
-
- modified files:
- paramiko/auth_transport.py paramiko/channel.py
- paramiko/common.py paramiko/pkey.py paramiko/transport.py
-
-
-2004-04-06 22:03:21 GMT Robey Pointer <robey@lag.net> patch-43
-
- Summary:
- fix encrypted private key files
- Revision:
- secsh--dev--1.0--patch-43
-
- the random byte padding on private key files' BER data was confusing openssh,
- so switch to null-byte padding, which is slightly less secure but works with
- crappy old openssh. also, enforce the mode when writing the private key
- file. we really really want it to be 0600. (python seems to ignore the
- mode normally.)
-
- modified files:
- paramiko/pkey.py
-
-
-2004-04-06 08:16:02 GMT Robey Pointer <robey@lag.net> patch-42
-
- Summary:
- support py22, more or less
- Revision:
- secsh--dev--1.0--patch-42
-
- add roger binns' patches for supporting python 2.2. i hedged a bit on the
- logging stuff and just added some trickery to let logging be stubbed out for
- python 2.2. this changed a lot of import statements but i managed to avoid
- hacking at any of the existing logging.
-
- socket timeouts are required for the threads to notice when they've been
- deactivated. worked around it by using the 'select' module on py22.
-
- also fixed the sftp unit tests to cope with a password-protected private key.
-
- new files:
- paramiko/.arch-ids/logging22.py.id paramiko/logging22.py
-
- modified files:
- README demo.py demo_server.py demo_simple.py
- paramiko/__init__.py paramiko/auth_transport.py
- paramiko/channel.py paramiko/common.py paramiko/kex_gex.py
- paramiko/kex_group1.py paramiko/message.py paramiko/sftp.py
- paramiko/transport.py paramiko/util.py tests/test_sftp.py
-
-
-2004-04-05 22:32:03 GMT Robey Pointer <robey@lag.net> patch-41
-
- Summary:
- make get_remote_server_key() return a PKey object
- Revision:
- secsh--dev--1.0--patch-41
-
- a good suggestion from roger binns: make get_remote_server_key() just return
- a pkey object instead of a tuple of strings. all the strings can be extracted
- from the pkey object, as well as other potentially useful things.
-
- modified files:
- demo.py paramiko/transport.py
-
-
-2004-04-05 19:36:40 GMT Robey Pointer <robey@lag.net> patch-40
-
- Summary:
- add dss key generation too, and fix some bugs
- Revision:
- secsh--dev--1.0--patch-40
-
- added the ability to generate dss keys and write private dss key files,
- similar to rsa. in the process, fixed a couple of bugs with ber encoding
- and writing password-encrypted key files. the key has to be padded to the
- iblock size of the cipher -- it's very difficult to determine how the others
- do this, so i just add random bytes to the end.
-
- fixed the simple demo to use Transport's (host, port) constructor for
- simplicity, and fixed a bug where the standard demo's DSS login wouldn't
- work.
-
- also, move the common logfile setup crap into util so all the demos can just
- call that one.
-
- modified files:
- demo.py demo_simple.py paramiko/ber.py paramiko/dsskey.py
- paramiko/pkey.py paramiko/rsakey.py paramiko/util.py
-
-
-2004-04-05 10:37:18 GMT Robey Pointer <robey@lag.net> patch-39
-
- Summary:
- add global request mechanism
- Revision:
- secsh--dev--1.0--patch-39
-
- add transport.global_request() to make a global-style request (usually an
- extension to the protocol -- like keepalives) and handle requests from the
- remote host. incoming requests are now handled and responded to correctly,
- which should make openssh-style keepalives work. (before, we would silently
- ignore them, which was wrong.)
-
- modified files:
- paramiko/common.py paramiko/message.py paramiko/transport.py
-
-
-2004-04-05 10:24:33 GMT Robey Pointer <robey@lag.net> patch-38
-
- Summary:
- add common.py file
- Revision:
- secsh--dev--1.0--patch-38
-
- missing from previous change because tla doesn't like to add files in some
- situations. (frown)
-
-
- new files:
- paramiko/.arch-ids/common.py.id paramiko/common.py
-
-
-2004-04-05 10:16:31 GMT Robey Pointer <robey@lag.net> patch-37
-
- Summary:
- can now generate rsa keys (not dss yet)
- Revision:
- secsh--dev--1.0--patch-37
-
- added functionality to ber to create ber streams. added some common methods
- to PKey to allow dumping the key to base64 (the format used by openssh for
- public key files and host key lists), and a factory for creating a key from
- a private key file, and a common way to save private keys. RSAKey luckily
- didn't have to change that much.
-
- also added a factory method to RSAKey to generate a new key.
-
-
- modified files:
- paramiko/ber.py paramiko/pkey.py paramiko/rsakey.py
-
-
-2004-04-05 10:12:59 GMT Robey Pointer <robey@lag.net> patch-36
-
- Summary:
- add common.py for commonly used constants and globals
- Revision:
- secsh--dev--1.0--patch-36
-
- common.py now stores the constants and globals.
- lots of renaming because of this.
-
- modified files:
- paramiko/auth_transport.py paramiko/channel.py
- paramiko/kex_gex.py paramiko/kex_group1.py
- paramiko/transport.py paramiko/util.py
-
-
-2004-04-02 02:41:43 GMT Robey Pointer <robey@lag.net> patch-35
-
- Summary:
- add send_ignore
- Revision:
- secsh--dev--1.0--patch-35
-
- add send_ignore() call to allow for sending garbage ignored packets to the
- remote side.
-
- modified files:
- paramiko/transport.py
-
-
-2004-03-16 07:33:09 GMT Robey Pointer <robey@lag.net> patch-34
-
- Summary:
- fix some arcana in unpacking private keys
- Revision:
- secsh--dev--1.0--patch-34
-
- "!= type([])" is a pretty obscure way to say it. let's try "is not list"
- which is a lot more readable.
-
- (mostly this is a test to make sure tla is working okay on my laptop.)
-
- modified files:
- paramiko/dsskey.py paramiko/rsakey.py
-
-
-2004-03-09 01:09:17 GMT Robey Pointer <robey@lag.net> patch-33
-
- Summary:
- include tests in manifest
- Revision:
- secsh--dev--1.0--patch-33
-
- include the tests in the manifest for dist, and remove some outdated notes in
- NOTES about the exported API (this is doc'd wayyy better in epydoc now).
-
- modified files:
- MANIFEST.in NOTES
-
-
-2004-03-08 17:54:19 GMT Robey Pointer <robey@lag.net> patch-32
-
- Summary:
- add unit tests
- Revision:
- secsh--dev--1.0--patch-32
-
- add unit tests for BufferedFile and SFTP (it's a start). remove the demo sftp
- client because it was 99% copied from the other demos, which makes it kinda
- confusing. the unit tests are a much better example.
-
- new files:
- .arch-ids/test.py.id test.py tests/.arch-ids/=id
- tests/.arch-ids/test_file.py.id
- tests/.arch-ids/test_sftp.py.id tests/test_file.py
- tests/test_sftp.py
-
- removed files:
- .arch-ids/demo_sftp.py.id demo_sftp.py
-
- new directories:
- tests tests/.arch-ids
-
-
-2004-03-08 17:52:25 GMT Robey Pointer <robey@lag.net> patch-31
-
- Summary:
- bump version number to eevee
- Revision:
- secsh--dev--1.0--patch-31
-
- bump the version number to eevee in a few places and talk about the unit
- tests.
-
- modified files:
- Makefile README paramiko/__init__.py setup.py
-
-
-2004-03-08 17:50:49 GMT Robey Pointer <robey@lag.net> patch-30
-
- Summary:
- finish up client sftp support
- Revision:
- secsh--dev--1.0--patch-30
-
- added 'stat' to SFTPFile and SFTP, documented 'open' and 'listdir', and added
- 'rmdir', 'lstat', 'symlink', 'chmod', 'chown', 'utime', 'readlink'.
-
- turned off ultra debugging now that the unit tests are all working.
-
- modified files:
- paramiko/sftp.py
-
-
-2004-03-08 17:45:44 GMT Robey Pointer <robey@lag.net> patch-29
-
- Summary:
- fix some docs and BufferedFile.readline
- Revision:
- secsh--dev--1.0--patch-29
-
- fix some documentation and fix readline()'s universal newline support to
- always return strings ending with '\n', regardless of how they were in the
- original file. (this is an obvious feature of python's universal newline
- support that i somehow missed before.)
-
- modified files:
- paramiko/file.py paramiko/message.py
-
-
-2004-03-08 09:47:47 GMT Robey Pointer <robey@lag.net> patch-28
-
- Summary:
- fix lingering thread bug
- Revision:
- secsh--dev--1.0--patch-28
-
- this bug has been in there forever and i could never figure out a workaround
- till now.
-
- when the python interpreter exits, it doesn't necessarily destroy the
- remaining objects or call __del__ on anything, and it will lock up until all
- threads finish running. how the threads are supposed to notice the exiting
- interpreter has always been sort of a mystery to me.
-
- tonight i figured out how to use the 'atexit' module to register a handler
- that runs when the interpreter exits. now we keep a list of active threads
- and ask them all to exit on shutdown. no more going to another shell to
- kill -9 python! yeah!!
-
- modified files:
- paramiko/transport.py
-
-
-2004-03-04 08:21:45 GMT Robey Pointer <robey@lag.net> patch-27
-
- Summary:
- add BufferedFile abstraction
- Revision:
- secsh--dev--1.0--patch-27
-
- SFTP client mode is mostly functional. there are probably still some bugs
- but most of the operations on "file" objects have survived my simple tests.
-
- BufferedFile wraps a simpler stream in something that looks like a python
- file (and can even handle seeking if the stream underneath supports it).
- it's meant to be subclassed. most of it is ripped out of what used to be
- ChannelFile so i can reuse it for sftp -- ChannelFile is now tiny.
-
- SFTP and Message are now exported.
-
- fixed util.format_binary_line to not quote spaces.
-
- new files:
- .arch-ids/demo_sftp.py.id demo_sftp.py
- paramiko/.arch-ids/file.py.id paramiko/.arch-ids/sftp.py.id
- paramiko/file.py paramiko/sftp.py
-
- modified files:
- paramiko/__init__.py paramiko/channel.py paramiko/message.py
- paramiko/util.py
-
-
-2004-01-27 02:04:59 GMT Robey Pointer <robey@lag.net> patch-26
-
- Summary:
- Transport constructor can take hostname or address tuple
- Revision:
- secsh--dev--1.0--patch-26
-
- part of an ongoing attempt to make "simple" versions of some of the API calls,
- so you can do common-case operations with just a few calls:
-
- Transport's constructor will now let you pass in a string or tuple instead
- of a socket-like object. if you pass in a string, it assumes the string is
- a hostname (with optional ":port" segment) and turns that into an address
- tuple. if you pass in a tuple, it assumes it's an address tuple. in both
- cases, it then creates a socket, connects to the given address, and then
- continues as if that was the socket passed in.
-
- the idea being that you can call Transport('example.com') and it will do
- the right thing.
-
- modified files:
- paramiko/transport.py
-
-
-2004-01-27 02:00:19 GMT Robey Pointer <robey@lag.net> patch-25
-
- Summary:
- pkey no longer raises binascii.Error
- Revision:
- secsh--dev--1.0--patch-25
-
- catch binascii.Error in the private key decoder and convert it into an
- SSHException. there's no reason people should have to care that it was a
- decoding error vs. any of the other million things that could be wrong in
- a corrupt key file.
-
- modified files:
- paramiko/pkey.py
-
-
-2004-01-27 01:45:44 GMT Robey Pointer <robey@lag.net> patch-24
-
- Summary:
- document more of Message; add get_int64
- Revision:
- secsh--dev--1.0--patch-24
-
- all of the get_* methods are now documented, but there's a bit more to do.
- get_int64 added for eventual sftp support.
-
- modified files:
- paramiko/message.py
-
-
-2004-01-04 10:33:05 GMT Robey Pointer <robey@lag.net> patch-23
-
- Summary:
- quick doc fix.
- Revision:
- secsh--dev--1.0--patch-23
-
- fix broken cross-link in kex_gex docs.
-
- modified files:
- paramiko/kex_gex.py
-
-
-2004-01-04 10:26:00 GMT Robey Pointer <robey@lag.net> patch-22
-
- Summary:
- fix MANIFEST.in, change version numbers to 0.9-doduo, fix LPGL notices
- Revision:
- secsh--dev--1.0--patch-22
-
- fixed MANIFEST.in to include the demo scripts, LICENSE, and ChangeLog.
- upped everything to version 0.9-doduo.
-
- fixed the copyright notice, and added the LGPL banner to the top of every
- python file.
-
- modified files:
- MANIFEST.in Makefile NOTES README paramiko/__init__.py
- paramiko/auth_transport.py paramiko/ber.py paramiko/channel.py
- paramiko/dsskey.py paramiko/kex_gex.py paramiko/kex_group1.py
- paramiko/message.py paramiko/pkey.py paramiko/primes.py
- paramiko/rsakey.py paramiko/ssh_exception.py
- paramiko/transport.py paramiko/util.py setup.py
-
-
-2004-01-04 10:07:35 GMT Robey Pointer <robey@lag.net> patch-21
-
- Summary:
- MANIFEST -> MANIFEST.in, fix setup.py.
- Revision:
- secsh--dev--1.0--patch-21
-
- out with MANIFEST, in with MANIFEST.in.
-
- new files:
- .arch-ids/MANIFEST.in.id MANIFEST.in
-
- removed files:
- .arch-ids/MANIFEST.id MANIFEST
-
- modified files:
- setup.py
-
-
-2004-01-04 09:29:13 GMT Robey Pointer <robey@lag.net> patch-20
-
- Summary:
- more docs, and password-protected key files can now be read
- Revision:
- secsh--dev--1.0--patch-20
-
- lots more documentation, some of it moved out of the README file, which is
- now much smaller and less rambling.
-
- repr(Transport) now reports the number of bits used in the cipher.
-
- cleaned up BER to use util functions, and throw a proper exception (the new
- BERException) on error. it doesn't ever have to be a full BER decoder, but
- it can at least comb its hair and tuck in its shirt.
-
- lots of stuff added to PKey.read_private_key_file so it can try to decode
- password-protected key files. right now it only understands "DES-EDE3-CBC"
- format, but this is the only format i've seen openssh make so far. if the
- key is password-protected, but no password was given, a new exception
- (PasswordRequiredException) is raised so an outer layer can ask for a password
- and try again.
-
- modified files:
- README demo.py demo_server.py paramiko/__init__.py
- paramiko/auth_transport.py paramiko/ber.py paramiko/channel.py
- paramiko/dsskey.py paramiko/pkey.py paramiko/primes.py
- paramiko/rsakey.py paramiko/ssh_exception.py
- paramiko/transport.py paramiko/util.py
-
-
-2003-12-31 06:31:43 GMT Robey Pointer <robey@lag.net> patch-19
-
- Summary:
- renamed auth_key -> auth_publickey; more docs.
- Revision:
- secsh--dev--1.0--patch-19
-
- renamed Transport.auth_key to auth_publickey for consistency. and lots more
- documentation.
-
- modified files:
- README demo.py demo_server.py paramiko/__init__.py
- paramiko/auth_transport.py paramiko/transport.py
-
-
-2003-12-30 22:24:21 GMT Robey Pointer <robey@lag.net> patch-18
-
- Summary:
- added public-key support to server mode, more docs
- Revision:
- secsh--dev--1.0--patch-18
-
- added public-key support to server mode (it can now verify a client signature)
- and added a demo of that to the demo_server.py script (user_rsa_key). in the
- process, cleaned up the API of PKey so that now it only has to know about
- signing and verifying ssh2 blobs, and can be hashed and compared with other
- keys (comparing & hashing only the public parts of the key). keys can also
- be created from strings now too.
-
- some more documentation and hiding private methods.
-
- new files:
- .arch-ids/user_rsa_key.id .arch-ids/user_rsa_key.pub.id
- user_rsa_key user_rsa_key.pub
-
- modified files:
- Makefile demo_server.py paramiko/__init__.py
- paramiko/auth_transport.py paramiko/dsskey.py
- paramiko/kex_gex.py paramiko/kex_group1.py paramiko/pkey.py
- paramiko/rsakey.py paramiko/transport.py
-
-
-2003-12-30 07:18:20 GMT Robey Pointer <robey@lag.net> patch-17
-
- Summary:
- lots more documentation, and added Transport.connect()
- Revision:
- secsh--dev--1.0--patch-17
-
- renamed demo_host_key to demo_rsa_key. moved changelog to a separate file,
- and indicated that future changelog entries should be fetched from tla.
- tried to clean up "__all__" in a way that makes epydoc still work.
-
- added lots more documentation, and renamed many methods and vars to hide
- them as private non-exported API.
-
- Transport's ModulusPack is now a static member, so it only has to be loaded
- once, and can then be used by any future Transport object.
-
- added Transport.connect(), which tries to wrap all the SSH2 negotiation and
- authentication into one method. you should be able to create a Transport,
- call connect(), and then create channels.
-
- new files:
- .arch-ids/ChangeLog.id .arch-ids/demo_simple.py.id ChangeLog
- demo_simple.py paramiko/.arch-ids/pkey.py.id paramiko/pkey.py
-
- removed files:
- .arch-ids/paramiko.py.id paramiko.py
-
- modified files:
- Makefile NOTES README demo.py demo_server.py
- paramiko/__init__.py paramiko/auth_transport.py
- paramiko/channel.py paramiko/dsskey.py paramiko/kex_gex.py
- paramiko/kex_group1.py paramiko/rsakey.py
- paramiko/transport.py setup.py {arch}/=tagging-method
-
- renamed files:
- .arch-ids/demo_host_key.id
- ==> .arch-ids/demo_rsa_key.id
- demo_host_key
- ==> demo_rsa_key
-
-
-2003-12-28 03:20:42 GMT Robey Pointer <robey@lag.net> patch-16
-
- Summary:
- hook up server-side kex-gex; add more documentation
- Revision:
- secsh--dev--1.0--patch-16
-
- group-exchange kex should work now on the server side. it will only be
- advertised if a "moduli" file has been loaded (see the -gasp- docs) so we
- don't spend hours (literally. hours.) computing primes. some of the logic
- was previously wrong, too, since it had never been tested.
-
- fixed repr() string for Transport/BaseTransport. moved is_authenticated to
- Transport where it belongs.
-
- added lots of documentation (but still only about 10% documented). lots of
- methods were made private finally.
-
- new files:
- paramiko/.arch-ids/primes.py.id paramiko/primes.py
-
- modified files:
- NOTES demo.py demo_server.py paramiko/__init__.py
- paramiko/auth_transport.py paramiko/channel.py
- paramiko/kex_gex.py paramiko/kex_group1.py
- paramiko/transport.py paramiko/util.py
-
-
-2003-12-27 02:03:44 GMT Robey Pointer <robey@lag.net> patch-15
-
- Summary:
- fix up new paramiko/ folder.
- Revision:
- secsh--dev--1.0--patch-15
-
- moved SSHException to a new file (ssh_exception.py) and turned paramiko.py
- into an __init__.py file. i'm still not entirely sure how this normally
- works, so i may have done something wrong, but it's supposed to work the
- same as before.
-
- new files:
- paramiko/.arch-ids/__init__.py.id
- paramiko/.arch-ids/ssh_exception.py.id paramiko/__init__.py
- paramiko/ssh_exception.py
-
- modified files:
- paramiko/auth_transport.py paramiko/channel.py
- paramiko/dsskey.py paramiko/kex_gex.py paramiko/kex_group1.py
- paramiko/transport.py
-
-
-2003-12-27 01:49:19 GMT Robey Pointer <robey@lag.net> patch-14
-
- Summary:
- move the paramiko files into a paramiko/ folder.
- Revision:
- secsh--dev--1.0--patch-14
-
- just moving the files into a folder. it won't build this way yet.
-
- new files:
- paramiko/.arch-ids/=id
-
- renamed files:
- .arch-ids/auth_transport.py.id
- ==> paramiko/.arch-ids/auth_transport.py.id
- .arch-ids/ber.py.id
- ==> paramiko/.arch-ids/ber.py.id
- .arch-ids/channel.py.id
- ==> paramiko/.arch-ids/channel.py.id
- .arch-ids/dsskey.py.id
- ==> paramiko/.arch-ids/dsskey.py.id
- .arch-ids/kex_gex.py.id
- ==> paramiko/.arch-ids/kex_gex.py.id
- .arch-ids/kex_group1.py.id
- ==> paramiko/.arch-ids/kex_group1.py.id
- .arch-ids/message.py.id
- ==> paramiko/.arch-ids/message.py.id
- .arch-ids/rsakey.py.id
- ==> paramiko/.arch-ids/rsakey.py.id
- .arch-ids/transport.py.id
- ==> paramiko/.arch-ids/transport.py.id
- .arch-ids/util.py.id
- ==> paramiko/.arch-ids/util.py.id
- auth_transport.py
- ==> paramiko/auth_transport.py
- ber.py
- ==> paramiko/ber.py
- channel.py
- ==> paramiko/channel.py
- dsskey.py
- ==> paramiko/dsskey.py
- kex_gex.py
- ==> paramiko/kex_gex.py
- kex_group1.py
- ==> paramiko/kex_group1.py
- message.py
- ==> paramiko/message.py
- rsakey.py
- ==> paramiko/rsakey.py
- transport.py
- ==> paramiko/transport.py
- util.py
- ==> paramiko/util.py
-
- new directories:
- paramiko paramiko/.arch-ids
-
-
-2003-12-24 22:09:43 GMT Robey Pointer <robey@lag.net> patch-13
-
- Summary:
- fix a deadlock/race in handle_eof & close
- Revision:
- secsh--dev--1.0--patch-13
-
- (patch from fred gansevles)
- add locking around the eof handler and the close() call, so we can't be in
- both simultaneously.
-
- modified files:
- channel.py
-
-
-2003-12-24 20:49:38 GMT Robey Pointer <robey@lag.net> patch-12
-
- Summary:
- fix dss key signing
- Revision:
- secsh--dev--1.0--patch-12
-
- (expanded on a patch from fred gansevles)
- add a demo dss key for server mode, and fix some bugs that had caused the dss
- signing stuff to never work before. the demo_server is a bit more verbose
- now, too. both key types (RSAKey & DSSKey) now have a function to return the
- fingerprint of the key, and both versions of read_private_key_file() now raise
- exceptions on failure, instead of just silently setting "valid" to false.
-
- new files:
- .arch-ids/demo_dss_key.id demo_dss_key
-
- modified files:
- demo_server.py dsskey.py kex_gex.py kex_group1.py paramiko.py
- rsakey.py transport.py
-
-
-2003-12-23 06:44:56 GMT Robey Pointer <robey@lag.net> patch-11
-
- Summary:
- in server mode, don't offer keys we don't have
- Revision:
- secsh--dev--1.0--patch-11
-
- (from Paolo Losi) in server mode, when advertising which key methods we
- support, don't list methods that we don't have any existing keys for.
-
- modified files:
- transport.py
-
-
-2003-12-23 06:36:27 GMT Robey Pointer <robey@lag.net> patch-10
-
- Summary:
- add logfiles and .pyc files to the "junk" list
- Revision:
- secsh--dev--1.0--patch-10
-
- add *.log and *.pyc to the explicit junk list.
-
- modified files:
- {arch}/=tagging-method
-
-
-2003-11-10 08:49:50 GMT Robey Pointer <robey@lag.net> patch-9
-
- Summary:
- rename secsh -> paramiko
- Revision:
- secsh--dev--1.0--patch-9
-
- also, rename SecshException back to SSHException. sigh. :)
-
- modified files:
- ./MANIFEST ./Makefile ./NOTES ./README ./auth_transport.py
- ./channel.py ./demo.py ./demo_server.py ./kex_gex.py
- ./kex_group1.py ./message.py ./paramiko.py ./setup.py
- ./transport.py
-
- renamed files:
- ./.arch-ids/secsh.py.id
- ==> ./.arch-ids/paramiko.py.id
- ./secsh.py
- ==> ./paramiko.py
-
-
-2003-11-10 06:52:35 GMT Robey Pointer <robey@lag.net> patch-8
-
- Summary:
- doc changes
- Revision:
- secsh--dev--1.0--patch-8
-
-
- modified files:
- ./README ./demo_server.py ./secsh.py
-
-
-2003-11-10 04:54:02 GMT Robey Pointer <robey@lag.net> patch-7
-
- Summary:
- cleaned up server code, renamed some files & classes
- Revision:
- secsh--dev--1.0--patch-7
-
- renamed demo-server.py and demo-host-key to demo_server.py and
- demo_host_key, just to be consistent.
-
- renamed SSHException -> SecshException.
-
- generalized the mechanism where Channel decides whether to allow
- different channel requests: 4 of the main ones (pty, window-change,
- shell, and subsystem) go through easily override-able methods now.
- you could probably make an actual ssh shell server.
-
- gave ChannelFile a repr().
-
- turned off ultra debugging in the demos. demo_server creates a
- subclass of Channel to allow pty/shell and sets an event when the
- shell request is made, so that it knows when it can start sending
- the fake bbs.
-
- renamed to charmander and updated some of the distutils files.
-
- modified files:
- ./MANIFEST ./NOTES ./auth_transport.py ./channel.py ./demo.py
- ./demo_server.py ./kex_gex.py ./kex_group1.py ./secsh.py
- ./setup.py ./transport.py
-
- renamed files:
- ./.arch-ids/demo-host-key.id
- ==> ./.arch-ids/demo_host_key.id
- ./.arch-ids/demo-server.py.id
- ==> ./.arch-ids/demo_server.py.id
- ./demo-host-key
- ==> ./demo_host_key
- ./demo-server.py
- ==> ./demo_server.py
-
-
-2003-11-09 21:16:35 GMT Robey Pointer <robey@lag.net> patch-6
-
- Summary:
- notes about the exported api
- Revision:
- secsh--dev--1.0--patch-6
-
- just wrote some quick notes (for a few of the classes) about which
- methods are intended to be the exported API. python has no decent
- way of distinguishing private vs public.
-
-
- modified files:
- ./NOTES
-
-
-2003-11-09 21:14:21 GMT Robey Pointer <robey@lag.net> patch-5
-
- Summary:
- big chunk of work which makes server code 95% done
- Revision:
- secsh--dev--1.0--patch-5
-
- fixed auth check methods to return just a result (failed, succeeded,
- partially succeeded) and always use get_allowed_auths to determine the
- list of allowed auth methods to return.
-
- channel's internal API changed a bit to allow for client-side vs.
- server-side channels. we now honor the "want-reply" bit from channel
- requests. in server mode (for now), we automatically allow pty-req
- and shell requests without doing anything.
-
- ChannelFile was fixed up a bit to support universal newlines. readline
- got rewritten: the old way used the "greedy" read call from ChannelFile,
- which won't work if the socket doesn't have that much data buffered and
- ready. now it uses recv directly, and tracks the different newlines.
-
- demo-server.py now answers to a single shell request (like a CLI ssh
- tool will make) and does a very simple demo pretending to be a BBS.
-
- transport: fixed a bug with parsing the remote side's banner. channel
- requests are passed to another method in server mode, to determine if
- we should allow it. new allowed channels are added to an accept queue,
- and a new method 'accept' (with timeout) will block until the next
- incoming channel is ready.
-
-
- modified files:
- ./auth_transport.py ./channel.py ./demo-server.py ./demo.py
- ./transport.py
-
-
-2003-11-09 20:59:51 GMT Robey Pointer <robey@lag.net> patch-4
-
- Summary:
- change kex-gex server code to generate primes by hand
- Revision:
- secsh--dev--1.0--patch-4
-
- added a util function "generate_prime" to compare to the incredibly slow C
- version, but it's no faster of course. i think kex-gex from the server is
- just not going to be feasible without having a separate thread generate some
- primes in the background to have handy when a request comes in. so in short,
- this still doesn't work.
-
- also i put bit_length into util and a tb_strings function which gets stack
- traceback info and splits it into a list of strings.
-
-
- modified files:
- ./kex_gex.py ./util.py
-
-
-2003-11-07 10:36:42 GMT Robey Pointer <robey@lag.net> patch-3
-
- Summary:
- remove some leftover garbage from dsskey
- Revision:
- secsh--dev--1.0--patch-3
-
- leftover from a cut & paste i was doing a few days ago. bad robey.
-
- modified files:
- ./dsskey.py
-
-
-2003-11-06 07:34:27 GMT Robey Pointer <robey@lag.net> patch-2
-
- Summary:
- add a demo host key and point demo-server at it.
- Revision:
- secsh--dev--1.0--patch-2
-
- also, temporarily comment out the nonfunctional kex-gex method.
-
- new files:
- ./.arch-ids/demo-host-key.id ./demo-host-key
-
- modified files:
- ./demo-server.py ./transport.py
-
-
-2003-11-04 08:50:22 GMT Robey Pointer <robey@lag.net> patch-1
-
- Summary:
- no changes
- Revision:
- secsh--dev--1.0--patch-1
-
- why aren't my log messages kept?
-
- modified files:
- ./kex_gex.py
-
- new patches:
- robey@lag.net--2003/secsh--dev--1.0--patch-1
-
-
-2003-11-04 08:34:24 GMT Robey Pointer <robey@lag.net> base-0
-
- Summary:
- initial import
- Revision:
- secsh--dev--1.0--base-0
-
-
- (automatically generated log message)
-
- new files:
- ./LICENSE ./MANIFEST ./Makefile ./NOTES ./README
- ./auth_transport.py ./ber.py ./channel.py ./demo-server.py
- ./demo.py ./dsskey.py ./kex_gex.py ./kex_group1.py
- ./message.py ./rsakey.py ./secsh.py ./setup.py ./transport.py
- ./util.py
-
- new patches:
- robey@lag.net--2003/secsh--dev--1.0--base-0
-
-
diff --git a/README.rst b/README.rst
index 3ed9e7bc..e267f69a 100644
--- a/README.rst
+++ b/README.rst
@@ -11,7 +11,7 @@ Paramiko
:Paramiko: Python SSH module
:Copyright: Copyright (c) 2003-2009 Robey Pointer <robeypointer@gmail.com>
-:Copyright: Copyright (c) 2013-2016 Jeff Forcier <jeff@bitprophet.org>
+:Copyright: Copyright (c) 2013-2017 Jeff Forcier <jeff@bitprophet.org>
:License: `LGPL <https://www.gnu.org/copyleft/lesser.html>`_
:Homepage: http://www.paramiko.org/
:API docs: http://docs.paramiko.org
@@ -21,30 +21,22 @@ Paramiko
What
----
-"Paramiko" is a combination of the esperanto words for "paranoid" and
-"friend". It's a module for Python 2.6+ that implements the SSH2 protocol
-for secure (encrypted and authenticated) connections to remote machines.
-Unlike SSL (aka TLS), SSH2 protocol does not require hierarchical
-certificates signed by a powerful central authority. You may know SSH2 as
-the protocol that replaced Telnet and rsh for secure access to remote
-shells, but the protocol also includes the ability to open arbitrary
-channels to remote services across the encrypted tunnel (this is how SFTP
-works, for example).
-
-It is written entirely in Python (no C or platform-dependent code) and is
-released under the GNU Lesser General Public License (`LGPL
+"Paramiko" is a combination of the Esperanto words for "paranoid" and
+"friend". It's a module for Python 2.6+/3.3+ that implements the SSH2 protocol
+for secure (encrypted and authenticated) connections to remote machines. Unlike
+SSL (aka TLS), SSH2 protocol does not require hierarchical certificates signed
+by a powerful central authority. You may know SSH2 as the protocol that
+replaced Telnet and rsh for secure access to remote shells, but the protocol
+also includes the ability to open arbitrary channels to remote services across
+the encrypted tunnel (this is how SFTP works, for example).
+
+It is written entirely in Python (though it depends on third-party C wrappers
+for low level crypto; these are often available precompiled) and is released
+under the GNU Lesser General Public License (`LGPL
<https://www.gnu.org/copyleft/lesser.html>`_).
-The package and its API is fairly well documented in the "doc/" folder
-that should have come with this archive.
-
-
-Requirements
-------------
-
-- `Python <http://www.python.org/>`_ 2.6, 2.7, or 3.3+
-- `Cryptography <https://cryptography.io>`_ 0.8 or better
-- `pyasn1 <https://pypi.python.org/pypi/pyasn1>`_ 0.1.7 or better
+The package and its API is fairly well documented in the ``docs`` folder that
+should have come with this repository.
Installation
@@ -87,20 +79,21 @@ Demo
----
Several demo scripts come with Paramiko to demonstrate how to use it.
-Probably the simplest demo of all is this::
+Probably the simplest demo is this::
- import paramiko, base64
- key = paramiko.RSAKey(data=base64.decodestring('AAA...'))
+ import base64
+ import paramiko
+ key = paramiko.RSAKey(data=base64.b64decode(b'AAA...'))
client = paramiko.SSHClient()
client.get_host_keys().add('ssh.example.com', 'ssh-rsa', key)
client.connect('ssh.example.com', username='strongbad', password='thecheat')
stdin, stdout, stderr = client.exec_command('ls')
for line in stdout:
- print '... ' + line.strip('\n')
+ print('... ' + line.strip('\n'))
client.close()
This prints out the results of executing ``ls`` on a remote server. The host
-key 'AAA...' should of course be replaced by the actual base64 encoding of the
+key ``b'AAA...'`` should of course be replaced by the actual base64 encoding of the
host key. If you skip host key verification, the connection is not secure!
The following example scripts (in demos/) get progressively more detailed:
@@ -134,7 +127,7 @@ Use
---
The demo scripts are probably the best example of how to use this package.
-There is also a lot of documentation, generated with Sphinx autodoc, in the
+Also a lot of documentation is generated by Sphinx autodoc, in the
doc/ folder.
There are also unit tests here::
diff --git a/demos/demo_server.py b/demos/demo_server.py
index c4af9b10..3a7ec854 100644
--- a/demos/demo_server.py
+++ b/demos/demo_server.py
@@ -40,7 +40,7 @@ print('Read key: ' + u(hexlify(host_key.get_fingerprint())))
class Server (paramiko.ServerInterface):
- # 'data' is the output of base64.encodestring(str(key))
+ # 'data' is the output of base64.b64encode(key)
# (using the "user_rsa_key" files)
data = (b'AAAAB3NzaC1yc2EAAAABIwAAAIEAyO4it3fHlmGZWJaGrfeHOVY7RWO3P9M7hp'
b'fAu7jJ2d7eothvfeuoRFtJwhUmZDluRdFyhFY/hFAh76PJKGAusIqIQKlkJxMC'
@@ -96,9 +96,7 @@ class Server (paramiko.ServerInterface):
return paramiko.AUTH_FAILED
def enable_auth_gssapi(self):
- UseGSSAPI = True
- GSSAPICleanupCredentials = False
- return UseGSSAPI
+ return True
def get_allowed_auths(self, username):
return 'gssapi-keyex,gssapi-with-mic,password,publickey'
diff --git a/dev-requirements.txt b/dev-requirements.txt
index 9e4564a5..716f432d 100644
--- a/dev-requirements.txt
+++ b/dev-requirements.txt
@@ -1,11 +1,12 @@
# Older junk
tox>=1.4,<1.5
# For newer tasks like building Sphinx docs.
-invoke>=0.11.1
-invocations>=0.11.0
-sphinx>=1.1.3
-alabaster>=0.7.5
-releases>=1.0.0
-semantic_version>=2.4,<2.5
+invoke>=0.13,<2.0
+invocations>=0.13,<2.0
+sphinx>=1.1.3,<1.5
+alabaster>=0.7.5,<2.0
+releases>=1.1.0,<2.0
+semantic_version<3.0
wheel==0.24
twine==1.5
+flake8==2.6.2
diff --git a/paramiko/__init__.py b/paramiko/__init__.py
index 9e2ba013..197f519a 100644
--- a/paramiko/__init__.py
+++ b/paramiko/__init__.py
@@ -16,6 +16,7 @@
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+# flake8: noqa
import sys
from paramiko._version import __version__, __version_info__
@@ -28,13 +29,18 @@ __license__ = "GNU Lesser General Public License (LGPL)"
from paramiko.transport import SecurityOptions, Transport
-from paramiko.client import SSHClient, MissingHostKeyPolicy, AutoAddPolicy, RejectPolicy, WarningPolicy
+from paramiko.client import (
+ SSHClient, MissingHostKeyPolicy, AutoAddPolicy, RejectPolicy,
+ WarningPolicy,
+)
from paramiko.auth_handler import AuthHandler
from paramiko.ssh_gss import GSSAuth, GSS_AUTH_AVAILABLE
from paramiko.channel import Channel, ChannelFile
-from paramiko.ssh_exception import SSHException, PasswordRequiredException, \
- BadAuthenticationType, ChannelException, BadHostKeyException, \
- AuthenticationException, ProxyCommandFailure
+from paramiko.ssh_exception import (
+ SSHException, PasswordRequiredException, BadAuthenticationType,
+ ChannelException, BadHostKeyException, AuthenticationException,
+ ProxyCommandFailure,
+)
from paramiko.server import ServerInterface, SubsystemHandler, InteractiveQuery
from paramiko.rsakey import RSAKey
from paramiko.dsskey import DSSKey
@@ -55,49 +61,56 @@ from paramiko.hostkeys import HostKeys
from paramiko.config import SSHConfig
from paramiko.proxy import ProxyCommand
-from paramiko.common import AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED, \
- OPEN_SUCCEEDED, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_FAILED_CONNECT_FAILED, \
- OPEN_FAILED_UNKNOWN_CHANNEL_TYPE, OPEN_FAILED_RESOURCE_SHORTAGE
+from paramiko.common import (
+ AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED, OPEN_SUCCEEDED,
+ OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_FAILED_CONNECT_FAILED,
+ OPEN_FAILED_UNKNOWN_CHANNEL_TYPE, OPEN_FAILED_RESOURCE_SHORTAGE,
+)
-from paramiko.sftp import SFTP_OK, SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, \
- SFTP_BAD_MESSAGE, SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, SFTP_OP_UNSUPPORTED
+from paramiko.sftp import (
+ SFTP_OK, SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE,
+ SFTP_BAD_MESSAGE, SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST,
+ SFTP_OP_UNSUPPORTED,
+)
from paramiko.common import io_sleep
-__all__ = [ 'Transport',
- 'SSHClient',
- 'MissingHostKeyPolicy',
- 'AutoAddPolicy',
- 'RejectPolicy',
- 'WarningPolicy',
- 'SecurityOptions',
- 'SubsystemHandler',
- 'Channel',
- 'PKey',
- 'RSAKey',
- 'DSSKey',
- 'Message',
- 'SSHException',
- 'AuthenticationException',
- 'PasswordRequiredException',
- 'BadAuthenticationType',
- 'ChannelException',
- 'BadHostKeyException',
- 'ProxyCommand',
- 'ProxyCommandFailure',
- 'SFTP',
- 'SFTPFile',
- 'SFTPHandle',
- 'SFTPClient',
- 'SFTPServer',
- 'SFTPError',
- 'SFTPAttributes',
- 'SFTPServerInterface',
- 'ServerInterface',
- 'BufferedFile',
- 'Agent',
- 'AgentKey',
- 'HostKeys',
- 'SSHConfig',
- 'util',
- 'io_sleep' ]
+__all__ = [
+ 'Transport',
+ 'SSHClient',
+ 'MissingHostKeyPolicy',
+ 'AutoAddPolicy',
+ 'RejectPolicy',
+ 'WarningPolicy',
+ 'SecurityOptions',
+ 'SubsystemHandler',
+ 'Channel',
+ 'PKey',
+ 'RSAKey',
+ 'DSSKey',
+ 'Message',
+ 'SSHException',
+ 'AuthenticationException',
+ 'PasswordRequiredException',
+ 'BadAuthenticationType',
+ 'ChannelException',
+ 'BadHostKeyException',
+ 'ProxyCommand',
+ 'ProxyCommandFailure',
+ 'SFTP',
+ 'SFTPFile',
+ 'SFTPHandle',
+ 'SFTPClient',
+ 'SFTPServer',
+ 'SFTPError',
+ 'SFTPAttributes',
+ 'SFTPServerInterface',
+ 'ServerInterface',
+ 'BufferedFile',
+ 'Agent',
+ 'AgentKey',
+ 'HostKeys',
+ 'SSHConfig',
+ 'util',
+ 'io_sleep',
+]
diff --git a/paramiko/_version.py b/paramiko/_version.py
index 4b78efad..4e7cf19d 100644
--- a/paramiko/_version.py
+++ b/paramiko/_version.py
@@ -1,2 +1,2 @@
-__version_info__ = (1, 17, 0)
+__version_info__ = (2, 0, 5)
__version__ = '.'.join(map(str, __version_info__))
diff --git a/paramiko/_winapi.py b/paramiko/_winapi.py
index 77e0129c..a13d7e87 100644
--- a/paramiko/_winapi.py
+++ b/paramiko/_winapi.py
@@ -41,7 +41,7 @@ def format_system_message(errno):
ctypes.byref(result_buffer),
buffer_size,
arguments,
- )
+ )
# note the following will cause an infinite loop if GetLastError
# repeatedly returns an error that cannot be formatted, although
# this should not happen.
@@ -52,13 +52,14 @@ def format_system_message(errno):
class WindowsError(builtins.WindowsError):
- "more info about errors at http://msdn.microsoft.com/en-us/library/ms681381(VS.85).aspx"
+ """more info about errors at
+ http://msdn.microsoft.com/en-us/library/ms681381(VS.85).aspx"""
def __init__(self, value=None):
if value is None:
value = ctypes.windll.kernel32.GetLastError()
strerror = format_system_message(value)
- if sys.version_info > (3,3):
+ if sys.version_info > (3, 3):
args = 0, strerror, None, value
else:
args = value, strerror
@@ -78,6 +79,7 @@ class WindowsError(builtins.WindowsError):
def __repr__(self):
return '{self.__class__.__name__}({self.winerror})'.format(**vars())
+
def handle_nonzero_success(result):
if result == 0:
raise WindowsError()
@@ -89,7 +91,7 @@ def handle_nonzero_success(result):
GMEM_MOVEABLE = 0x2
GlobalAlloc = ctypes.windll.kernel32.GlobalAlloc
-GlobalAlloc.argtypes = ctypes.wintypes.UINT, ctypes.c_ssize_t
+GlobalAlloc.argtypes = ctypes.wintypes.UINT, ctypes.c_size_t
GlobalAlloc.restype = ctypes.wintypes.HANDLE
GlobalLock = ctypes.windll.kernel32.GlobalLock
@@ -118,9 +120,22 @@ CreateFileMapping.restype = ctypes.wintypes.HANDLE
MapViewOfFile = ctypes.windll.kernel32.MapViewOfFile
MapViewOfFile.restype = ctypes.wintypes.HANDLE
+UnmapViewOfFile = ctypes.windll.kernel32.UnmapViewOfFile
+UnmapViewOfFile.argtypes = ctypes.wintypes.HANDLE,
+
+RtlMoveMemory = ctypes.windll.kernel32.RtlMoveMemory
+RtlMoveMemory.argtypes = (
+ ctypes.c_void_p,
+ ctypes.c_void_p,
+ ctypes.c_size_t,
+)
+
+ctypes.windll.kernel32.LocalFree.argtypes = ctypes.wintypes.HLOCAL,
+
#####################
# jaraco.windows.mmap
+
class MemoryMap(object):
"""
A memory map object which can have security attributes overridden.
@@ -177,6 +192,7 @@ class MemoryMap(object):
ctypes.windll.kernel32.UnmapViewOfFile(self.view)
ctypes.windll.kernel32.CloseHandle(self.filemap)
+
#############################
# jaraco.windows.api.security
@@ -240,12 +256,15 @@ POLICY_EXECUTE = (
POLICY_VIEW_LOCAL_INFORMATION |
POLICY_LOOKUP_NAMES)
+
class TokenAccess:
TOKEN_QUERY = 0x8
+
class TokenInformationClass:
TokenUser = 1
+
class TOKEN_USER(ctypes.Structure):
num = 1
_fields_ = [
@@ -280,6 +299,7 @@ class SECURITY_DESCRIPTOR(ctypes.Structure):
('Dacl', ctypes.c_void_p),
]
+
class SECURITY_ATTRIBUTES(ctypes.Structure):
"""
typedef struct _SECURITY_ATTRIBUTES {
@@ -307,9 +327,17 @@ class SECURITY_ATTRIBUTES(ctypes.Structure):
self._descriptor = value
self.lpSecurityDescriptor = ctypes.addressof(value)
+
+ctypes.windll.advapi32.SetSecurityDescriptorOwner.argtypes = (
+ ctypes.POINTER(SECURITY_DESCRIPTOR),
+ ctypes.c_void_p,
+ ctypes.wintypes.BOOL,
+)
+
#########################
# jaraco.windows.security
+
def GetTokenInformation(token, information_class):
"""
Given a token, get the token information for it.
@@ -324,6 +352,7 @@ def GetTokenInformation(token, information_class):
ctypes.byref(data_size)))
return ctypes.cast(data, ctypes.POINTER(TOKEN_USER)).contents
+
def OpenProcessToken(proc_handle, access):
result = ctypes.wintypes.HANDLE()
proc_handle = ctypes.wintypes.HANDLE(proc_handle)
@@ -331,6 +360,7 @@ def OpenProcessToken(proc_handle, access):
proc_handle, access, ctypes.byref(result)))
return result
+
def get_current_user():
"""
Return a TOKEN_USER for the owner of this process.
@@ -341,6 +371,7 @@ def get_current_user():
)
return GetTokenInformation(process, TOKEN_USER)
+
def get_security_attributes_for_user(user=None):
"""
Return a SECURITY_ATTRIBUTES structure with the SID set to the
diff --git a/paramiko/agent.py b/paramiko/agent.py
index 6a8e7fb4..bc857efa 100644
--- a/paramiko/agent.py
+++ b/paramiko/agent.py
@@ -109,15 +109,23 @@ class AgentProxyThread(threading.Thread):
def run(self):
try:
(r, addr) = self.get_connection()
- # Found that r should be either a socket from the socket library or None
+ # Found that r should be either
+ # a socket from the socket library or None
self.__inr = r
- self.__addr = addr # This should be an IP address as a string? or None
+ # The address should be an IP address as a string? or None
+ self.__addr = addr
self._agent.connect()
- if not isinstance(self._agent, int) and (self._agent._conn is None or not hasattr(self._agent._conn, 'fileno')):
+ if (
+ not isinstance(self._agent, int) and
+ (
+ self._agent._conn is None or
+ not hasattr(self._agent._conn, 'fileno')
+ )
+ ):
raise AuthenticationException("Unable to connect to SSH agent")
self._communicate()
except:
- #XXX Not sure what to do here ... raise or pass ?
+ # XXX Not sure what to do here ... raise or pass ?
raise
def _communicate(self):
@@ -213,7 +221,8 @@ class AgentClientProxy(object):
if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
- retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK']))
+ retry_on_signal(
+ lambda: conn.connect(os.environ['SSH_AUTH_SOCK']))
except:
# probably a dangling env var: the ssh agent is gone
return
@@ -244,7 +253,7 @@ class AgentServerProxy(AgentSSH):
"""
:param .Transport t: Transport used for SSH Agent communication forwarding
- :raises SSHException: mostly if we lost the agent
+ :raises: `.SSHException` -- mostly if we lost the agent
"""
def __init__(self, t):
AgentSSH.__init__(self)
@@ -331,14 +340,14 @@ class Agent(AgentSSH):
"""
Client interface for using private keys from an SSH agent running on the
local machine. If an SSH agent is running, this class can be used to
- connect to it and retreive `.PKey` objects which can be used when
+ connect to it and retrieve `.PKey` objects which can be used when
attempting to authenticate to remote SSH servers.
Upon initialization, a session with the local machine's SSH agent is
opened, if one is running. If no agent is running, initialization will
succeed, but `get_keys` will return an empty tuple.
- :raises SSHException:
+ :raises: `.SSHException` --
if an SSH agent is found, but speaks an incompatible protocol
"""
def __init__(self):
diff --git a/paramiko/auth_handler.py b/paramiko/auth_handler.py
index 38b23729..33f01da6 100644
--- a/paramiko/auth_handler.py
+++ b/paramiko/auth_handler.py
@@ -21,25 +21,27 @@
"""
import weakref
-from paramiko.common import cMSG_SERVICE_REQUEST, cMSG_DISCONNECT, \
- DISCONNECT_SERVICE_NOT_AVAILABLE, DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE, \
- cMSG_USERAUTH_REQUEST, cMSG_SERVICE_ACCEPT, DEBUG, AUTH_SUCCESSFUL, INFO, \
- cMSG_USERAUTH_SUCCESS, cMSG_USERAUTH_FAILURE, AUTH_PARTIALLY_SUCCESSFUL, \
- cMSG_USERAUTH_INFO_REQUEST, WARNING, AUTH_FAILED, cMSG_USERAUTH_PK_OK, \
- cMSG_USERAUTH_INFO_RESPONSE, MSG_SERVICE_REQUEST, MSG_SERVICE_ACCEPT, \
- MSG_USERAUTH_REQUEST, MSG_USERAUTH_SUCCESS, MSG_USERAUTH_FAILURE, \
- MSG_USERAUTH_BANNER, MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE, \
- cMSG_USERAUTH_GSSAPI_RESPONSE, cMSG_USERAUTH_GSSAPI_TOKEN, \
- cMSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, cMSG_USERAUTH_GSSAPI_ERROR, \
- cMSG_USERAUTH_GSSAPI_ERRTOK, cMSG_USERAUTH_GSSAPI_MIC,\
- MSG_USERAUTH_GSSAPI_RESPONSE, MSG_USERAUTH_GSSAPI_TOKEN, \
- MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, MSG_USERAUTH_GSSAPI_ERROR, \
- MSG_USERAUTH_GSSAPI_ERRTOK, MSG_USERAUTH_GSSAPI_MIC, MSG_NAMES
+from paramiko.common import (
+ cMSG_SERVICE_REQUEST, cMSG_DISCONNECT, DISCONNECT_SERVICE_NOT_AVAILABLE,
+ DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE, cMSG_USERAUTH_REQUEST,
+ cMSG_SERVICE_ACCEPT, DEBUG, AUTH_SUCCESSFUL, INFO, cMSG_USERAUTH_SUCCESS,
+ cMSG_USERAUTH_FAILURE, AUTH_PARTIALLY_SUCCESSFUL,
+ cMSG_USERAUTH_INFO_REQUEST, WARNING, AUTH_FAILED, cMSG_USERAUTH_PK_OK,
+ cMSG_USERAUTH_INFO_RESPONSE, MSG_SERVICE_REQUEST, MSG_SERVICE_ACCEPT,
+ MSG_USERAUTH_REQUEST, MSG_USERAUTH_SUCCESS, MSG_USERAUTH_FAILURE,
+ MSG_USERAUTH_BANNER, MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE,
+ cMSG_USERAUTH_GSSAPI_RESPONSE, cMSG_USERAUTH_GSSAPI_TOKEN,
+ cMSG_USERAUTH_GSSAPI_MIC, MSG_USERAUTH_GSSAPI_RESPONSE,
+ MSG_USERAUTH_GSSAPI_TOKEN, MSG_USERAUTH_GSSAPI_ERROR,
+ MSG_USERAUTH_GSSAPI_ERRTOK, MSG_USERAUTH_GSSAPI_MIC, MSG_NAMES,
+)
from paramiko.message import Message
from paramiko.py3compat import bytestring
-from paramiko.ssh_exception import SSHException, AuthenticationException, \
- BadAuthenticationType, PartialAuthentication
+from paramiko.ssh_exception import (
+ SSHException, AuthenticationException, BadAuthenticationType,
+ PartialAuthentication,
+)
from paramiko.server import InteractiveQuery
from paramiko.ssh_gss import GSSAuth
@@ -149,7 +151,7 @@ class AuthHandler (object):
if self.auth_event is not None:
self.auth_event.set()
- ### internals...
+ # ...internals...
def _request_auth(self):
m = Message()
@@ -237,7 +239,8 @@ class AuthHandler (object):
m.add_boolean(True)
m.add_string(self.private_key.get_name())
m.add_string(self.private_key)
- blob = self._get_session_blob(self.private_key, 'ssh-connection', self.username)
+ blob = self._get_session_blob(
+ self.private_key, 'ssh-connection', self.username)
sig = self.private_key.sign_ssh_data(blob)
m.add_string(sig)
elif self.auth_method == 'keyboard-interactive':
@@ -267,10 +270,11 @@ class AuthHandler (object):
ptype, m = self.transport.packetizer.read_message()
if ptype == MSG_USERAUTH_GSSAPI_TOKEN:
srv_token = m.get_string()
- next_token = sshgss.ssh_init_sec_context(self.gss_host,
- mech,
- self.username,
- srv_token)
+ next_token = sshgss.ssh_init_sec_context(
+ self.gss_host,
+ mech,
+ self.username,
+ srv_token)
# After this step the GSSAPI should not return any
# token. If it does, we keep sending the token to
# the server until no more token is returned.
@@ -282,7 +286,8 @@ class AuthHandler (object):
m.add_string(next_token)
self.transport.send_message(m)
else:
- raise SSHException("Received Package: %s" % MSG_NAMES[ptype])
+ raise SSHException(
+ "Received Package: %s" % MSG_NAMES[ptype])
m = Message()
m.add_byte(cMSG_USERAUTH_GSSAPI_MIC)
# send the MIC to the server
@@ -297,7 +302,7 @@ class AuthHandler (object):
maj_status = m.get_int()
min_status = m.get_int()
err_msg = m.get_string()
- lang_tag = m.get_string() # we don't care!
+ m.get_string() # Lang tag - discarded
raise SSHException("GSS-API Error:\nMajor Status: %s\n\
Minor Status: %s\ \nError Message:\
%s\n") % (str(maj_status),
@@ -307,9 +312,12 @@ class AuthHandler (object):
self._parse_userauth_failure(m)
return
else:
- raise SSHException("Received Package: %s" % MSG_NAMES[ptype])
- elif self.auth_method == 'gssapi-keyex' and\
- self.transport.gss_kex_used:
+ raise SSHException(
+ "Received Package: %s" % MSG_NAMES[ptype])
+ elif (
+ self.auth_method == 'gssapi-keyex' and
+ self.transport.gss_kex_used
+ ):
kexgss = self.transport.kexgss_ctxt
kexgss.set_username(self.username)
mic_token = kexgss.ssh_get_mic(self.transport.session_id)
@@ -317,10 +325,13 @@ class AuthHandler (object):
elif self.auth_method == 'none':
pass
else:
- raise SSHException('Unknown auth method "%s"' % self.auth_method)
+ raise SSHException(
+ 'Unknown auth method "%s"' % self.auth_method)
self.transport._send_message(m)
else:
- self.transport._log(DEBUG, 'Service request "%s" accepted (?)' % service)
+ self.transport._log(
+ DEBUG,
+ 'Service request "%s" accepted (?)' % service)
def _send_auth_result(self, username, method, result):
# okay, send result
@@ -332,7 +343,8 @@ class AuthHandler (object):
else:
self.transport._log(INFO, 'Auth rejected (%s).' % method)
m.add_byte(cMSG_USERAUTH_FAILURE)
- m.add_string(self.transport.server_object.get_allowed_auths(username))
+ m.add_string(
+ self.transport.server_object.get_allowed_auths(username))
if result == AUTH_PARTIALLY_SUCCESSFUL:
m.add_boolean(True)
else:
@@ -372,12 +384,19 @@ class AuthHandler (object):
username = m.get_text()
service = m.get_text()
method = m.get_text()
- self.transport._log(DEBUG, 'Auth request (type=%s) service=%s, username=%s' % (method, service, username))
+ self.transport._log(
+ DEBUG,
+ 'Auth request (type=%s) service=%s, username=%s' % (
+ method, service, username))
if service != 'ssh-connection':
self._disconnect_service_not_available()
return
- if (self.auth_username is not None) and (self.auth_username != username):
- self.transport._log(WARNING, 'Auth rejected because the client attempted to change username in mid-flight')
+ if ((self.auth_username is not None) and
+ (self.auth_username != username)):
+ self.transport._log(
+ WARNING,
+ 'Auth rejected because the client attempted to change username in mid-flight' # noqa
+ )
self._disconnect_no_more_auth()
return
self.auth_username = username
@@ -396,9 +415,12 @@ class AuthHandler (object):
# in this case, just return the raw byte string.
pass
if changereq:
- # always treated as failure, since we don't support changing passwords, but collect
- # the list of valid auth types from the callback anyway
- self.transport._log(DEBUG, 'Auth request to change passwords (rejected)')
+ # always treated as failure, since we don't support changing
+ # passwords, but collect the list of valid auth types from
+ # the callback anyway
+ self.transport._log(
+ DEBUG,
+ 'Auth request to change passwords (rejected)')
newpassword = m.get_binary()
try:
newpassword = newpassword.decode('UTF-8', 'replace')
@@ -406,7 +428,8 @@ class AuthHandler (object):
pass
result = AUTH_FAILED
else:
- result = self.transport.server_object.check_auth_password(username, password)
+ result = self.transport.server_object.check_auth_password(
+ username, password)
elif method == 'publickey':
sig_attached = m.get_boolean()
keytype = m.get_text()
@@ -414,16 +437,21 @@ class AuthHandler (object):
try:
key = self.transport._key_info[keytype](Message(keyblob))
except SSHException as e:
- self.transport._log(INFO, 'Auth rejected: public key: %s' % str(e))
+ self.transport._log(
+ INFO,
+ 'Auth rejected: public key: %s' % str(e))
key = None
except:
- self.transport._log(INFO, 'Auth rejected: unsupported or mangled public key')
+ self.transport._log(
+ INFO,
+ 'Auth rejected: unsupported or mangled public key')
key = None
if key is None:
self._disconnect_no_more_auth()
return
# first check if this key is okay... if not, we can skip the verify
- result = self.transport.server_object.check_auth_publickey(username, key)
+ result = self.transport.server_object.check_auth_publickey(
+ username, key)
if result != AUTH_FAILED:
# key is okay, verify it
if not sig_attached:
@@ -438,12 +466,14 @@ class AuthHandler (object):
sig = Message(m.get_binary())
blob = self._get_session_blob(key, service, username)
if not key.verify_ssh_sig(blob, sig):
- self.transport._log(INFO, 'Auth rejected: invalid signature')
+ self.transport._log(
+ INFO,
+ 'Auth rejected: invalid signature')
result = AUTH_FAILED
elif method == 'keyboard-interactive':
- lang = m.get_string()
submethods = m.get_string()
- result = self.transport.server_object.check_auth_interactive(username, submethods)
+ result = self.transport.server_object.check_auth_interactive(
+ username, submethods)
if isinstance(result, InteractiveQuery):
# make interactive query instead of response
self._interactive_query(result)
@@ -457,15 +487,17 @@ class AuthHandler (object):
# We can't accept more than one OID, so if the SSH client sends
# more than one, disconnect.
if mechs > 1:
- self.transport._log(INFO,
- 'Disconnect: Received more than one GSS-API OID mechanism')
+ self.transport._log(
+ INFO,
+ 'Disconnect: Received more than one GSS-API OID mechanism')
self._disconnect_no_more_auth()
desired_mech = m.get_string()
mech_ok = sshgss.ssh_check_mech(desired_mech)
# if we don't support the mechanism, disconnect.
if not mech_ok:
- self.transport._log(INFO,
- 'Disconnect: Received an invalid GSS-API OID mechanism')
+ self.transport._log(
+ INFO,
+ 'Disconnect: Received an invalid GSS-API OID mechanism')
self._disconnect_no_more_auth()
# send the Kerberos V5 GSSAPI OID to the client
supported_mech = sshgss.ssh_gss_oids("server")
@@ -515,7 +547,8 @@ class AuthHandler (object):
# The OpenSSH server is able to create a TGT with the delegated
# client credentials, but this is not supported by GSS-API.
result = AUTH_SUCCESSFUL
- self.transport.server_object.check_auth_gssapi_with_mic(username, result)
+ self.transport.server_object.check_auth_gssapi_with_mic(
+ username, result)
elif method == "gssapi-keyex" and gss_auth:
mic_token = m.get_string()
sshgss = self.transport.kexgss_ctxt
@@ -532,14 +565,17 @@ class AuthHandler (object):
self._send_auth_result(username, method, result)
raise
result = AUTH_SUCCESSFUL
- self.transport.server_object.check_auth_gssapi_keyex(username, result)
+ self.transport.server_object.check_auth_gssapi_keyex(
+ username, result)
else:
result = self.transport.server_object.check_auth_none(username)
# okay, send result
self._send_auth_result(username, method, result)
def _parse_userauth_success(self, m):
- self.transport._log(INFO, 'Authentication (%s) successful!' % self.auth_method)
+ self.transport._log(
+ INFO,
+ 'Authentication (%s) successful!' % self.auth_method)
self.authenticated = True
self.transport._auth_trigger()
if self.auth_event is not None:
@@ -553,11 +589,18 @@ class AuthHandler (object):
self.transport._log(DEBUG, 'Methods: ' + str(authlist))
self.transport.saved_exception = PartialAuthentication(authlist)
elif self.auth_method not in authlist:
- self.transport._log(DEBUG, 'Authentication type (%s) not permitted.' % self.auth_method)
- self.transport._log(DEBUG, 'Allowed methods: ' + str(authlist))
- self.transport.saved_exception = BadAuthenticationType('Bad authentication type', authlist)
+ self.transport._log(
+ DEBUG,
+ 'Authentication type (%s) not permitted.' % self.auth_method)
+ self.transport._log(
+ DEBUG,
+ 'Allowed methods: ' + str(authlist))
+ self.transport.saved_exception = BadAuthenticationType(
+ 'Bad authentication type', authlist)
else:
- self.transport._log(INFO, 'Authentication (%s) failed.' % self.auth_method)
+ self.transport._log(
+ INFO,
+ 'Authentication (%s) failed.' % self.auth_method)
self.authenticated = False
self.username = None
if self.auth_event is not None:
@@ -566,7 +609,6 @@ class AuthHandler (object):
def _parse_userauth_banner(self, m):
banner = m.get_string()
self.banner = banner
- lang = m.get_string()
self.transport._log(INFO, 'Auth banner: %s' % banner)
# who cares.
@@ -580,7 +622,8 @@ class AuthHandler (object):
prompt_list = []
for i in range(prompts):
prompt_list.append((m.get_text(), m.get_boolean()))
- response_list = self.interactive_handler(title, instructions, prompt_list)
+ response_list = self.interactive_handler(
+ title, instructions, prompt_list)
m = Message()
m.add_byte(cMSG_USERAUTH_INFO_RESPONSE)
@@ -596,12 +639,14 @@ class AuthHandler (object):
responses = []
for i in range(n):
responses.append(m.get_text())
- result = self.transport.server_object.check_auth_interactive_response(responses)
- if isinstance(type(result), InteractiveQuery):
+ result = self.transport.server_object.check_auth_interactive_response(
+ responses)
+ if isinstance(result, InteractiveQuery):
# make interactive query instead of response
self._interactive_query(result)
return
- self._send_auth_result(self.auth_username, 'keyboard-interactive', result)
+ self._send_auth_result(
+ self.auth_username, 'keyboard-interactive', result)
_handler_table = {
MSG_SERVICE_REQUEST: _parse_service_request,
diff --git a/paramiko/ber.py b/paramiko/ber.py
index a388df07..7725f944 100644
--- a/paramiko/ber.py
+++ b/paramiko/ber.py
@@ -71,7 +71,8 @@ class BER(object):
t = size & 0x7f
if self.idx + t > len(self.content):
return None
- size = util.inflate_long(self.content[self.idx: self.idx + t], True)
+ size = util.inflate_long(
+ self.content[self.idx: self.idx + t], True)
self.idx += t
if self.idx + size > len(self.content):
# can't fit
@@ -87,7 +88,8 @@ class BER(object):
return util.inflate_long(data)
else:
# 1: boolean (00 false, otherwise true)
- raise BERException('Unknown ber encoding type %d (robey is lazy)' % ident)
+ raise BERException(
+ 'Unknown ber encoding type %d (robey is lazy)' % ident)
@staticmethod
def decode_sequence(data):
diff --git a/paramiko/buffered_pipe.py b/paramiko/buffered_pipe.py
index d5fe164e..d9f5149d 100644
--- a/paramiko/buffered_pipe.py
+++ b/paramiko/buffered_pipe.py
@@ -41,7 +41,7 @@ class BufferedPipe (object):
file or socket, but is fed data from another thread. This is used by
`.Channel`.
"""
-
+
def __init__(self):
self._lock = threading.Lock()
self._cv = threading.Condition(self._lock)
@@ -67,21 +67,30 @@ class BufferedPipe (object):
Set an event on this buffer. When data is ready to be read (or the
buffer has been closed), the event will be set. When no data is
ready, the event will be cleared.
-
+
:param threading.Event event: the event to set/clear
"""
- self._event = event
- if len(self._buffer) > 0:
- event.set()
- else:
- event.clear()
-
+ self._lock.acquire()
+ try:
+ self._event = event
+ # Make sure the event starts in `set` state if we appear to already
+ # be closed; otherwise, if we start in `clear` state & are closed,
+ # nothing will ever call `.feed` and the event (& OS pipe, if we're
+ # wrapping one - see `Channel.fileno`) will permanently stay in
+ # `clear`, causing deadlock if e.g. `select`ed upon.
+ if self._closed or len(self._buffer) > 0:
+ event.set()
+ else:
+ event.clear()
+ finally:
+ self._lock.release()
+
def feed(self, data):
"""
Feed new data into this pipe. This method is assumed to be called
from a separate thread, so synchronization is done.
-
- :param data: the data to add, as a `str` or `bytes`
+
+ :param data: the data to add, as a ``str`` or ``bytes``
"""
self._lock.acquire()
try:
@@ -97,7 +106,7 @@ class BufferedPipe (object):
Returns true if data is buffered and ready to be read from this
feeder. A ``False`` result does not mean that the feeder has closed;
it means you may need to wait before more data arrives.
-
+
:return:
``True`` if a `read` call would immediately return at least one
byte; ``False`` otherwise.
@@ -125,11 +134,11 @@ class BufferedPipe (object):
:param int nbytes: maximum number of bytes to read
:param float timeout:
maximum seconds to wait (or ``None``, the default, to wait forever)
- :return: the read data, as a `bytes`
-
- :raises PipeTimeout:
- if a timeout was specified and no data was ready before that
- timeout
+ :return: the read data, as a ``str`` or ``bytes``
+
+ :raises:
+ `.PipeTimeout` -- if a timeout was specified and no data was ready
+ before that timeout
"""
out = bytes()
self._lock.acquire()
@@ -163,11 +172,11 @@ class BufferedPipe (object):
self._lock.release()
return out
-
+
def empty(self):
"""
Clear out the buffer and return all data that was in it.
-
+
:return:
any data that was in the buffer prior to clearing it out, as a
`str`
@@ -181,7 +190,7 @@ class BufferedPipe (object):
return out
finally:
self._lock.release()
-
+
def close(self):
"""
Close this pipe object. Future calls to `read` after the buffer
@@ -199,7 +208,7 @@ class BufferedPipe (object):
def __len__(self):
"""
Return the number of bytes buffered.
-
+
:return: number (`int`) of bytes buffered
"""
self._lock.acquire()
diff --git a/paramiko/channel.py b/paramiko/channel.py
index 3c43eb10..1f603cf0 100644
--- a/paramiko/channel.py
+++ b/paramiko/channel.py
@@ -28,10 +28,11 @@ import threading
from functools import wraps
from paramiko import util
-from paramiko.common import cMSG_CHANNEL_REQUEST, cMSG_CHANNEL_WINDOW_ADJUST, \
- cMSG_CHANNEL_DATA, cMSG_CHANNEL_EXTENDED_DATA, DEBUG, ERROR, \
- cMSG_CHANNEL_SUCCESS, cMSG_CHANNEL_FAILURE, cMSG_CHANNEL_EOF, \
- cMSG_CHANNEL_CLOSE
+from paramiko.common import (
+ cMSG_CHANNEL_REQUEST, cMSG_CHANNEL_WINDOW_ADJUST, cMSG_CHANNEL_DATA,
+ cMSG_CHANNEL_EXTENDED_DATA, DEBUG, ERROR, cMSG_CHANNEL_SUCCESS,
+ cMSG_CHANNEL_FAILURE, cMSG_CHANNEL_EOF, cMSG_CHANNEL_CLOSE,
+)
from paramiko.message import Message
from paramiko.py3compat import bytes_types
from paramiko.ssh_exception import SSHException
@@ -45,16 +46,17 @@ def open_only(func):
"""
Decorator for `.Channel` methods which performs an openness check.
- :raises SSHException:
- If the wrapped method is called on an unopened `.Channel`.
+ :raises:
+ `.SSHException` -- If the wrapped method is called on an unopened
+ `.Channel`.
"""
@wraps(func)
def _check(self, *args, **kwds):
if (
- self.closed
- or self.eof_received
- or self.eof_sent
- or not self.active
+ self.closed or
+ self.eof_received or
+ self.eof_sent or
+ not self.active
):
raise SSHException('Channel is not open')
return func(self, *args, **kwds)
@@ -74,7 +76,7 @@ class Channel (ClosingContextManager):
flow-controlled independently.) Similarly, if the server isn't reading
data you send, calls to `send` may block, unless you set a timeout. This
is exactly like a normal network socket, so it shouldn't be too surprising.
-
+
Instances of this class may be used as context managers.
"""
@@ -155,16 +157,18 @@ class Channel (ClosingContextManager):
after creating a client channel, to ask the server to provide some
basic terminal semantics for a shell invoked with `invoke_shell`.
It isn't necessary (or desirable) to call this method if you're going
- to exectue a single command with `exec_command`.
+ to execute a single command with `exec_command`.
- :param str term: the terminal type to emulate (for example, ``'vt100'``)
+ :param str term: the terminal type to emulate
+ (for example, ``'vt100'``)
:param int width: width (in characters) of the terminal screen
:param int height: height (in characters) of the terminal screen
:param int width_pixels: width (in pixels) of the terminal screen
:param int height_pixels: height (in pixels) of the terminal screen
- :raises SSHException:
- if the request was rejected or the channel was closed
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
@@ -195,7 +199,8 @@ class Channel (ClosingContextManager):
When the shell exits, the channel will be closed and can't be reused.
You must open a new channel if you wish to open another shell.
- :raises SSHException: if the request was rejected or the channel was
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
closed
"""
m = Message()
@@ -220,7 +225,8 @@ class Channel (ClosingContextManager):
:param str command: a shell command to execute.
- :raises SSHException: if the request was rejected or the channel was
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
closed
"""
m = Message()
@@ -245,8 +251,9 @@ class Channel (ClosingContextManager):
:param str subsystem: name of the subsystem being requested.
- :raises SSHException:
- if the request was rejected or the channel was closed
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
@@ -269,8 +276,9 @@ class Channel (ClosingContextManager):
:param int width_pixels: new width (in pixels) of the terminal screen
:param int height_pixels: new height (in pixels) of the terminal screen
- :raises SSHException:
- if the request was rejected or the channel was closed
+ :raises:
+ `.SSHException` -- if the request was rejected or the channel was
+ closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
@@ -311,11 +319,11 @@ class Channel (ClosingContextManager):
`.Transport` or session's ``window_size`` (e.g. that set by the
``default_window_size`` kwarg for `.Transport.__init__`) will cause
`.recv_exit_status` to hang indefinitely if it is called prior to a
- sufficiently large `~Channel..read` (or if there are no threads
- calling `~Channel.read` in the background).
+ sufficiently large `.Channel.recv` (or if there are no threads
+ calling `.Channel.recv` in the background).
In these cases, ensuring that `.recv_exit_status` is called *after*
- `~Channel.read` (or, again, using threads) can avoid the hang.
+ `.Channel.recv` (or, again, using threads) can avoid the hang.
:return: the exit code (as an `int`) of the process on the server.
@@ -347,8 +355,14 @@ class Channel (ClosingContextManager):
self.transport._send_user_message(m)
@open_only
- def request_x11(self, screen_number=0, auth_protocol=None, auth_cookie=None,
- single_connection=False, handler=None):
+ def request_x11(
+ self,
+ screen_number=0,
+ auth_protocol=None,
+ auth_cookie=None,
+ single_connection=False,
+ handler=None
+ ):
"""
Request an x11 session on this channel. If the server allows it,
further x11 requests can be made from the server to the client,
@@ -364,7 +378,7 @@ class Channel (ClosingContextManager):
generated, used, and returned. You will need to use this value to
verify incoming x11 requests and replace them with the actual local
x11 cookie (which requires some knowledge of the x11 protocol).
-
+
If a handler is passed in, the handler is called from another thread
whenever a new x11 connection arrives. The default handler queues up
incoming x11 connections, which may be retrieved using
@@ -383,8 +397,8 @@ class Channel (ClosingContextManager):
if True, only a single x11 connection will be forwarded (by
default, any number of x11 connections can arrive over this
session)
- :param function handler:
- an optional handler to use for incoming X11 connections
+ :param handler:
+ an optional callable handler to use for incoming X11 connections
:return: the auth_cookie used
"""
if auth_protocol is None:
@@ -413,10 +427,12 @@ class Channel (ClosingContextManager):
Request for a forward SSH Agent on this channel.
This is only valid for an ssh-agent from OpenSSH !!!
- :param function handler:
- a required handler to use for incoming SSH Agent connections
+ :param handler:
+ a required callable handler to use for incoming SSH Agent
+ connections
- :return: True if we are ok, else False (at that time we always return ok)
+ :return: True if we are ok, else False
+ (at that time we always return ok)
:raises: SSHException in case of channel problem.
"""
@@ -497,16 +513,16 @@ class Channel (ClosingContextManager):
self._feed(data)
return old
- ### socket API
+ # ...socket API...
def settimeout(self, timeout):
"""
Set a timeout on blocking read/write operations. The ``timeout``
- argument can be a nonnegative float expressing seconds, or ``None``. If
- a float is given, subsequent channel read/write operations will raise
- a timeout exception if the timeout period value has elapsed before the
- operation has completed. Setting a timeout of ``None`` disables
- timeouts on socket operations.
+ argument can be a nonnegative float expressing seconds, or ``None``.
+ If a float is given, subsequent channel read/write operations will
+ raise a timeout exception if the timeout period value has elapsed
+ before the operation has completed. Setting a timeout of ``None``
+ disables timeouts on socket operations.
``chan.settimeout(0.0)`` is equivalent to ``chan.setblocking(0)``;
``chan.settimeout(None)`` is equivalent to ``chan.setblocking(1)``.
@@ -600,11 +616,11 @@ class Channel (ClosingContextManager):
"""
Receive data from the channel. The return value is a string
representing the data received. The maximum amount of data to be
- received at once is specified by ``nbytes``. If a string of length zero
- is returned, the channel stream has closed.
+ received at once is specified by ``nbytes``. If a string of
+ length zero is returned, the channel stream has closed.
:param int nbytes: maximum number of bytes to read.
- :return: received data, as a `bytes`
+ :return: received data, as a ``str``/``bytes``.
:raises socket.timeout:
if no data is ready before the timeout set by `settimeout`.
@@ -751,7 +767,7 @@ class Channel (ClosingContextManager):
if sending stalled for longer than the timeout set by `settimeout`.
:raises socket.error:
if an error occurred before the entire string was sent.
-
+
.. note::
If the channel is closed while only part of the data has been
sent, there is no way to determine how much data (if any) was sent.
@@ -775,7 +791,7 @@ class Channel (ClosingContextManager):
if sending stalled for longer than the timeout set by `settimeout`.
:raises socket.error:
if an error occurred before the entire string was sent.
-
+
.. versionadded:: 1.1
"""
while s:
@@ -887,7 +903,13 @@ class Channel (ClosingContextManager):
"""
self.shutdown(1)
- ### calls from Transport
+ @property
+ def _closed(self):
+ # Concession to Python 3's socket API, which has a private ._closed
+ # attribute instead of a semipublic .closed attribute.
+ return self.closed
+
+ # ...calls from Transport
def _set_transport(self, transport):
self.transport = transport
@@ -896,7 +918,8 @@ class Channel (ClosingContextManager):
def _set_window(self, window_size, max_packet_size):
self.in_window_size = window_size
self.in_max_packet_size = max_packet_size
- # threshold of bytes we receive before we bother to send a window update
+ # threshold of bytes we receive before we bother to send
+ # a window update
self.in_window_threshold = window_size // 10
self.in_window_sofar = 0
self._log(DEBUG, 'Max packet in: %d bytes' % max_packet_size)
@@ -904,8 +927,9 @@ class Channel (ClosingContextManager):
def _set_remote_channel(self, chanid, window_size, max_packet_size):
self.remote_chanid = chanid
self.out_window_size = window_size
- self.out_max_packet_size = self.transport. \
- _sanitize_packet_size(max_packet_size)
+ self.out_max_packet_size = self.transport._sanitize_packet_size(
+ max_packet_size
+ )
self.active = 1
self._log(DEBUG, 'Max packet out: %d bytes' % self.out_max_packet_size)
@@ -937,7 +961,10 @@ class Channel (ClosingContextManager):
code = m.get_int()
s = m.get_binary()
if code != 1:
- self._log(ERROR, 'unknown extended_data type %d; discarding' % code)
+ self._log(
+ ERROR,
+ 'unknown extended_data type %d; discarding' % code
+ )
return
if self.combine_stderr:
self._feed(s)
@@ -977,8 +1004,15 @@ class Channel (ClosingContextManager):
if server is None:
ok = False
else:
- ok = server.check_channel_pty_request(self, term, width, height, pixelwidth,
- pixelheight, modes)
+ ok = server.check_channel_pty_request(
+ self,
+ term,
+ width,
+ height,
+ pixelwidth,
+ pixelheight,
+ modes
+ )
elif key == 'shell':
if server is None:
ok = False
@@ -1011,8 +1045,8 @@ class Channel (ClosingContextManager):
if server is None:
ok = False
else:
- ok = server.check_channel_window_change_request(self, width, height, pixelwidth,
- pixelheight)
+ ok = server.check_channel_window_change_request(
+ self, width, height, pixelwidth, pixelheight)
elif key == 'x11-req':
single_connection = m.get_boolean()
auth_proto = m.get_text()
@@ -1021,8 +1055,13 @@ class Channel (ClosingContextManager):
if server is None:
ok = False
else:
- ok = server.check_channel_x11_request(self, single_connection,
- auth_proto, auth_cookie, screen_number)
+ ok = server.check_channel_x11_request(
+ self,
+ single_connection,
+ auth_proto,
+ auth_cookie,
+ screen_number
+ )
elif key == 'auth-agent-req@openssh.com':
if server is None:
ok = False
@@ -1064,14 +1103,15 @@ class Channel (ClosingContextManager):
if m is not None:
self.transport._send_user_message(m)
- ### internals...
+ # ...internals...
def _send(self, s, m):
size = len(s)
self.lock.acquire()
try:
if self.closed:
- # this doesn't seem useful, but it is the documented behavior of Socket
+ # this doesn't seem useful, but it is the documented behavior
+ # of Socket
raise socket.error('Socket is closed')
size = self._wait_for_send_window(size)
if size == 0:
@@ -1139,7 +1179,8 @@ class Channel (ClosingContextManager):
return m1, m2
def _unlink(self):
- # server connection could die before we become active: still signal the close!
+ # server connection could die before we become active:
+ # still signal the close!
if self.closed:
return
self.lock.acquire()
@@ -1182,7 +1223,8 @@ class Channel (ClosingContextManager):
# should we block?
if self.timeout == 0.0:
raise socket.timeout()
- # loop here in case we get woken up but a different thread has filled the buffer
+ # loop here in case we get woken up but a different thread has
+ # filled the buffer
timeout = self.timeout
while self.out_window_size == 0:
if self.closed or self.eof_sent:
diff --git a/paramiko/client.py b/paramiko/client.py
index e3d3780e..ef1dc43e 100644
--- a/paramiko/client.py
+++ b/paramiko/client.py
@@ -91,7 +91,7 @@ class SSHClient (ClosingContextManager):
:param str filename: the filename to read, or ``None``
- :raises IOError:
+ :raises: ``IOError`` --
if a filename was provided and the file could not be read
"""
if filename is None:
@@ -118,7 +118,7 @@ class SSHClient (ClosingContextManager):
:param str filename: the filename to read
- :raises IOError: if the filename could not be read
+ :raises: ``IOError`` -- if the filename could not be read
"""
self._host_keys_filename = filename
self._host_keys.load(filename)
@@ -131,7 +131,7 @@ class SSHClient (ClosingContextManager):
:param str filename: the filename to save to
- :raises IOError: if the file could not be written
+ :raises: ``IOError`` -- if the file could not be written
"""
# update local host keys from file (in case other SSH clients
@@ -142,7 +142,8 @@ class SSHClient (ClosingContextManager):
with open(filename, 'w') as f:
for hostname, keys in self._host_keys.items():
for keytype, key in keys.items():
- f.write('%s %s %s\n' % (hostname, keytype, key.get_base64()))
+ f.write('%s %s %s\n' % (
+ hostname, keytype, key.get_base64()))
def get_host_keys(self):
"""
@@ -197,14 +198,16 @@ class SSHClient (ClosingContextManager):
:returns: Yields an iterable of ``(family, address)`` tuples
"""
guess = True
- addrinfos = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
+ addrinfos = socket.getaddrinfo(
+ hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
for (family, socktype, proto, canonname, sockaddr) in addrinfos:
if socktype == socket.SOCK_STREAM:
yield family, sockaddr
guess = False
- # some OS like AIX don't indicate SOCK_STREAM support, so just guess. :(
- # We only do this if we did not get a single result marked as socktype == SOCK_STREAM.
+ # some OS like AIX don't indicate SOCK_STREAM support, so just
+ # guess. :( We only do this if we did not get a single result marked
+ # as socktype == SOCK_STREAM.
if guess:
for family, _, _, _, sockaddr in addrinfos:
yield family, sockaddr
@@ -279,10 +282,12 @@ class SSHClient (ClosingContextManager):
:param float banner_timeout: an optional timeout (in seconds) to wait
for the SSH banner to be presented.
- :raises BadHostKeyException: if the server's host key could not be
+ :raises:
+ `.BadHostKeyException` -- if the server's host key could not be
verified
- :raises AuthenticationException: if authentication failed
- :raises SSHException: if there was any other error connecting or
+ :raises: `.AuthenticationException` -- if authentication failed
+ :raises:
+ `.SSHException` -- if there was any other error connecting or
establishing an SSH session
:raises socket.error: if a socket error occurred while connecting
@@ -323,7 +328,8 @@ class SSHClient (ClosingContextManager):
if len(errors) == len(to_try):
raise NoValidConnectionsError(errors)
- t = self._transport = Transport(sock, gss_kex=gss_kex, gss_deleg_creds=gss_deleg_creds)
+ t = self._transport = Transport(
+ sock, gss_kex=gss_kex, gss_deleg_creds=gss_deleg_creds)
t.use_compression(compress=compress)
if gss_kex and gss_host is None:
t.set_gss_host(hostname)
@@ -336,6 +342,7 @@ class SSHClient (ClosingContextManager):
if banner_timeout is not None:
t.banner_timeout = banner_timeout
t.start_client()
+ t.set_sshclient(self)
ResourceManager.register(self, t)
server_key = t.get_remote_server_key()
@@ -350,13 +357,14 @@ class SSHClient (ClosingContextManager):
# host key, because the host is authenticated via GSS-API / SSPI as
# well as our client.
if not self._transport.use_gss_kex:
- our_server_key = self._system_host_keys.get(server_hostkey_name,
- {}).get(keytype, None)
+ our_server_key = self._system_host_keys.get(
+ server_hostkey_name, {}).get(keytype)
if our_server_key is None:
our_server_key = self._host_keys.get(server_hostkey_name,
{}).get(keytype, None)
if our_server_key is None:
- # will raise exception if the key is rejected; let that fall out
+ # will raise exception if the key is rejected;
+ # let that fall out
self._policy.missing_host_key(self, server_hostkey_name,
server_key)
# if the callback returns, assume the key is ok
@@ -382,6 +390,12 @@ class SSHClient (ClosingContextManager):
def close(self):
"""
Close this SSHClient and its underlying `.Transport`.
+
+ .. warning::
+ Failure to do this may, in some situations, cause your Python
+ interpreter to hang at shutdown (often due to race conditions).
+ It's good practice to `close` your client objects anytime you're
+ done using them, instead of relying on garbage collection.
"""
if self._transport is None:
return
@@ -404,12 +418,12 @@ class SSHClient (ClosingContextManager):
interpreted the same way as by the built-in ``file()`` function in
Python
:param int timeout:
- set command's channel timeout. See `Channel.settimeout`.settimeout
+ set command's channel timeout. See `.Channel.settimeout`
:return:
the stdin, stdout, and stderr of the executing command, as a
3-tuple
- :raises SSHException: if the server fails to execute the command
+ :raises: `.SSHException` -- if the server fails to execute the command
"""
chan = self._transport.open_session(timeout=timeout)
if get_pty:
@@ -436,7 +450,7 @@ class SSHClient (ClosingContextManager):
:param int height_pixels: the height (in pixels) of the terminal window
:return: a new `.Channel` connected to the remote shell
- :raises SSHException: if the server fails to invoke a shell
+ :raises: `.SSHException` -- if the server fails to invoke a shell
"""
chan = self._transport.open_session()
chan.get_pty(term, width, height, width_pixels, height_pixels)
@@ -478,7 +492,7 @@ class SSHClient (ClosingContextManager):
saved_exception = None
two_factor = False
allowed_types = set()
- two_factor_types = set(['keyboard-interactive','password'])
+ two_factor_types = set(['keyboard-interactive', 'password'])
# If GSS-API support and GSS-PI Key Exchange was performed, we attempt
# authentication with gssapi-keyex.
@@ -503,8 +517,11 @@ class SSHClient (ClosingContextManager):
if pkey is not None:
try:
- self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint()))
- allowed_types = set(self._transport.auth_publickey(username, pkey))
+ self._log(
+ DEBUG,
+ 'Trying SSH key %s' % hexlify(pkey.get_fingerprint()))
+ allowed_types = set(
+ self._transport.auth_publickey(username, pkey))
two_factor = (allowed_types & two_factor_types)
if not two_factor:
return
@@ -515,9 +532,14 @@ class SSHClient (ClosingContextManager):
for key_filename in key_filenames:
for pkey_class in (RSAKey, DSSKey, ECDSAKey):
try:
- key = pkey_class.from_private_key_file(key_filename, password)
- self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename))
- allowed_types = set(self._transport.auth_publickey(username, key))
+ key = pkey_class.from_private_key_file(
+ key_filename, password)
+ self._log(
+ DEBUG,
+ 'Trying key %s from %s' % (
+ hexlify(key.get_fingerprint()), key_filename))
+ allowed_types = set(
+ self._transport.auth_publickey(username, key))
two_factor = (allowed_types & two_factor_types)
if not two_factor:
return
@@ -531,9 +553,14 @@ class SSHClient (ClosingContextManager):
for key in self._agent.get_keys():
try:
- self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint()))
- # for 2-factor auth a successfully auth'd key password will return an allowed 2fac auth method
- allowed_types = set(self._transport.auth_publickey(username, key))
+ self._log(
+ DEBUG,
+ 'Trying SSH agent key %s' % hexlify(
+ key.get_fingerprint()))
+ # for 2-factor auth a successfully auth'd key password
+ # will return an allowed 2fac auth method
+ allowed_types = set(
+ self._transport.auth_publickey(username, key))
two_factor = (allowed_types & two_factor_types)
if not two_factor:
return
@@ -569,9 +596,15 @@ class SSHClient (ClosingContextManager):
for pkey_class, filename in keyfiles:
try:
key = pkey_class.from_private_key_file(filename, password)
- self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename))
- # for 2-factor auth a successfully auth'd key will result in ['password']
- allowed_types = set(self._transport.auth_publickey(username, key))
+ self._log(
+ DEBUG,
+ 'Trying discovered key %s in %s' % (
+ hexlify(key.get_fingerprint()), filename))
+
+ # for 2-factor auth a successfully auth'd key will result
+ # in ['password']
+ allowed_types = set(
+ self._transport.auth_publickey(username, key))
two_factor = (allowed_types & two_factor_types)
if not two_factor:
return
@@ -655,4 +688,5 @@ class WarningPolicy (MissingHostKeyPolicy):
"""
def missing_host_key(self, client, hostname, key):
warnings.warn('Unknown %s host key for %s: %s' %
- (key.get_name(), hostname, hexlify(key.get_fingerprint())))
+ (key.get_name(), hostname, hexlify(
+ key.get_fingerprint())))
diff --git a/paramiko/common.py b/paramiko/common.py
index 0b0cc2a7..556f046a 100644
--- a/paramiko/common.py
+++ b/paramiko/common.py
@@ -20,10 +20,12 @@
Common constants and global variables.
"""
import logging
-from paramiko.py3compat import byte_chr, PY2, bytes_types, string_types, b, long
+from paramiko.py3compat import (
+ byte_chr, PY2, bytes_types, string_types, b, long,
+)
-MSG_DISCONNECT, MSG_IGNORE, MSG_UNIMPLEMENTED, MSG_DEBUG, MSG_SERVICE_REQUEST, \
- MSG_SERVICE_ACCEPT = range(1, 7)
+MSG_DISCONNECT, MSG_IGNORE, MSG_UNIMPLEMENTED, MSG_DEBUG, \
+ MSG_SERVICE_REQUEST, MSG_SERVICE_ACCEPT = range(1, 7)
MSG_KEXINIT, MSG_NEWKEYS = range(20, 22)
MSG_USERAUTH_REQUEST, MSG_USERAUTH_FAILURE, MSG_USERAUTH_SUCCESS, \
MSG_USERAUTH_BANNER = range(50, 54)
@@ -31,7 +33,7 @@ MSG_USERAUTH_PK_OK = 60
MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE = range(60, 62)
MSG_USERAUTH_GSSAPI_RESPONSE, MSG_USERAUTH_GSSAPI_TOKEN = range(60, 62)
MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, MSG_USERAUTH_GSSAPI_ERROR,\
-MSG_USERAUTH_GSSAPI_ERRTOK, MSG_USERAUTH_GSSAPI_MIC = range(63, 67)
+ MSG_USERAUTH_GSSAPI_ERRTOK, MSG_USERAUTH_GSSAPI_MIC = range(63, 67)
MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE = range(80, 83)
MSG_CHANNEL_OPEN, MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, \
MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_DATA, MSG_CHANNEL_EXTENDED_DATA, \
@@ -55,7 +57,8 @@ cMSG_USERAUTH_INFO_REQUEST = byte_chr(MSG_USERAUTH_INFO_REQUEST)
cMSG_USERAUTH_INFO_RESPONSE = byte_chr(MSG_USERAUTH_INFO_RESPONSE)
cMSG_USERAUTH_GSSAPI_RESPONSE = byte_chr(MSG_USERAUTH_GSSAPI_RESPONSE)
cMSG_USERAUTH_GSSAPI_TOKEN = byte_chr(MSG_USERAUTH_GSSAPI_TOKEN)
-cMSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE = byte_chr(MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE)
+cMSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE = \
+ byte_chr(MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE)
cMSG_USERAUTH_GSSAPI_ERROR = byte_chr(MSG_USERAUTH_GSSAPI_ERROR)
cMSG_USERAUTH_GSSAPI_ERRTOK = byte_chr(MSG_USERAUTH_GSSAPI_ERRTOK)
cMSG_USERAUTH_GSSAPI_MIC = byte_chr(MSG_USERAUTH_GSSAPI_MIC)
@@ -170,6 +173,7 @@ def asbytes(s):
raise Exception('Unknown type')
return s
+
xffffffff = long(0xffffffff)
x80000000 = long(0x80000000)
o666 = 438
@@ -202,4 +206,4 @@ MIN_WINDOW_SIZE = 2 ** 15
MIN_PACKET_SIZE = 2 ** 12
# Max windows size according to http://www.ietf.org/rfc/rfc4254.txt
-MAX_WINDOW_SIZE = 2**32 -1
+MAX_WINDOW_SIZE = 2 ** 32 - 1
diff --git a/paramiko/config.py b/paramiko/config.py
index e18fa4bf..073abb36 100644
--- a/paramiko/config.py
+++ b/paramiko/config.py
@@ -58,7 +58,7 @@ class SSHConfig (object):
host = {"host": ['*'], "config": {}}
for line in file_obj:
# Strip any leading or trailing whitespace from the line.
- # See https://github.com/paramiko/paramiko/issues/499 for more info.
+ # Refer to https://github.com/paramiko/paramiko/issues/499
line = line.strip()
if not line or line.startswith('#'):
continue
@@ -68,7 +68,7 @@ class SSHConfig (object):
raise Exception("Unparsable line %s" % line)
key = match.group(1).lower()
value = match.group(2)
-
+
if key == 'host':
self._config.append(host)
host = {
@@ -76,15 +76,17 @@ class SSHConfig (object):
'config': {}
}
elif key == 'proxycommand' and value.lower() == 'none':
- # Proxycommands of none should not be added as an actual value. (Issue #415)
- continue
+ # Store 'none' as None; prior to 3.x, it will get stripped out
+ # at the end (for compatibility with issue #415). After 3.x, it
+ # will simply not get stripped, leaving a nice explicit marker.
+ host['config'][key] = None
else:
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
- #identityfile, localforward, remoteforward keys are special cases, since they are allowed to be
- # specified multiple times and they should be tried in order
- # of specification.
+ # identityfile, localforward, remoteforward keys are special
+ # cases, since they are allowed to be specified multiple times
+ # and they should be tried in order of specification.
if key in ['identityfile', 'localforward', 'remoteforward']:
if key in host['config']:
host['config'][key].append(value)
@@ -127,10 +129,13 @@ class SSHConfig (object):
# else it will reference the original list
# in self._config and update that value too
# when the extend() is being called.
- ret[key] = value[:]
+ ret[key] = value[:] if value is not None else value
elif key == 'identityfile':
ret[key].extend(value)
ret = self._expand_variables(ret, hostname)
+ # TODO: remove in 3.x re #670
+ if 'proxycommand' in ret and ret['proxycommand'] is None:
+ del ret['proxycommand']
return ret
def get_hostnames(self):
@@ -204,6 +209,7 @@ class SSHConfig (object):
],
'proxycommand':
[
+ ('~', homedir),
('%h', config['hostname']),
('%p', port),
('%r', remoteuser)
@@ -211,13 +217,16 @@ class SSHConfig (object):
}
for k in config:
+ if config[k] is None:
+ continue
if k in replacements:
for find, replace in replacements[k]:
if isinstance(config[k], list):
for item in range(len(config[k])):
if find in config[k][item]:
- config[k][item] = config[k][item].\
- replace(find, str(replace))
+ config[k][item] = config[k][item].replace(
+ find, str(replace)
+ )
else:
if find in config[k]:
config[k] = config[k].replace(find, str(replace))
@@ -259,8 +268,9 @@ class LazyFqdn(object):
address_family = self.config.get('addressfamily', 'any').lower()
if address_family != 'any':
try:
- family = socket.AF_INET if address_family == 'inet' \
- else socket.AF_INET6
+ family = socket.AF_INET6
+ if address_family == 'inet':
+ socket.AF_INET
results = socket.getaddrinfo(
self.host,
None,
diff --git a/paramiko/dsskey.py b/paramiko/dsskey.py
index 7e14422c..55ef1e9b 100644
--- a/paramiko/dsskey.py
+++ b/paramiko/dsskey.py
@@ -25,7 +25,7 @@ from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dsa
from cryptography.hazmat.primitives.asymmetric.utils import (
- decode_rfc6979_signature, encode_rfc6979_signature
+ decode_dss_signature, encode_dss_signature
)
from paramiko import util
@@ -42,7 +42,8 @@ class DSSKey(PKey):
data.
"""
- def __init__(self, msg=None, data=None, filename=None, password=None, vals=None, file_obj=None):
+ def __init__(self, msg=None, data=None, filename=None, password=None,
+ vals=None, file_obj=None):
self.p = None
self.q = None
self.g = None
@@ -113,7 +114,7 @@ class DSSKey(PKey):
).private_key(backend=default_backend())
signer = key.signer(hashes.SHA1())
signer.update(data)
- r, s = decode_rfc6979_signature(signer.finalize())
+ r, s = decode_dss_signature(signer.finalize())
m = Message()
m.add_string('ssh-dss')
@@ -141,7 +142,7 @@ class DSSKey(PKey):
sigR = util.inflate_long(sig[:20], 1)
sigS = util.inflate_long(sig[20:], 1)
- signature = encode_rfc6979_signature(sigR, sigS)
+ signature = encode_dss_signature(sigR, sigS)
key = dsa.DSAPublicNumbers(
y=self.y,
@@ -207,7 +208,7 @@ class DSSKey(PKey):
generate a new host key or authentication key.
:param int bits: number of bits the generated key should be.
- :param function progress_func: Unused
+ :param progress_func: Unused
:return: new `.DSSKey` private key
"""
numbers = dsa.generate_private_key(
@@ -222,7 +223,7 @@ class DSSKey(PKey):
key.x = numbers.x
return key
- ### internals...
+ # ...internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file('DSA', filename, password)
@@ -239,8 +240,13 @@ class DSSKey(PKey):
keylist = BER(data).decode()
except BERException as e:
raise SSHException('Unable to parse key file: ' + str(e))
- if (type(keylist) is not list) or (len(keylist) < 6) or (keylist[0] != 0):
- raise SSHException('not a valid DSA private key file (bad ber encoding)')
+ if (
+ type(keylist) is not list or
+ len(keylist) < 6 or
+ keylist[0] != 0
+ ):
+ raise SSHException(
+ 'not a valid DSA private key file (bad ber encoding)')
self.p = keylist[1]
self.q = keylist[2]
self.g = keylist[3]
diff --git a/paramiko/ecdsakey.py b/paramiko/ecdsakey.py
index c69bef73..f5dacac8 100644
--- a/paramiko/ecdsakey.py
+++ b/paramiko/ecdsakey.py
@@ -20,21 +20,73 @@
ECDSA keys
"""
-import binascii
-
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.utils import (
- decode_rfc6979_signature, encode_rfc6979_signature
+ decode_dss_signature, encode_dss_signature
)
from paramiko.common import four_byte
from paramiko.message import Message
from paramiko.pkey import PKey
from paramiko.ssh_exception import SSHException
-from paramiko.util import deflate_long, inflate_long
+from paramiko.util import deflate_long
+
+
+class _ECDSACurve(object):
+ """
+ Represents a specific ECDSA Curve (nistp256, nistp384, etc).
+
+ Handles the generation of the key format identifier and the selection of
+ the proper hash function. Also grabs the proper curve from the 'ecdsa'
+ package.
+ """
+ def __init__(self, curve_class, nist_name):
+ self.nist_name = nist_name
+ self.key_length = curve_class.key_size
+
+ # Defined in RFC 5656 6.2
+ self.key_format_identifier = "ecdsa-sha2-" + self.nist_name
+
+ # Defined in RFC 5656 6.2.1
+ if self.key_length <= 256:
+ self.hash_object = hashes.SHA256
+ elif self.key_length <= 384:
+ self.hash_object = hashes.SHA384
+ else:
+ self.hash_object = hashes.SHA512
+
+ self.curve_class = curve_class
+
+
+class _ECDSACurveSet(object):
+ """
+ A collection to hold the ECDSA curves. Allows querying by oid and by key
+ format identifier. The two ways in which ECDSAKey needs to be able to look
+ up curves.
+ """
+ def __init__(self, ecdsa_curves):
+ self.ecdsa_curves = ecdsa_curves
+
+ def get_key_format_identifier_list(self):
+ return [curve.key_format_identifier for curve in self.ecdsa_curves]
+
+ def get_by_curve_class(self, curve_class):
+ for curve in self.ecdsa_curves:
+ if curve.curve_class == curve_class:
+ return curve
+
+ def get_by_key_format_identifier(self, key_format_identifier):
+ for curve in self.ecdsa_curves:
+ if curve.key_format_identifier == key_format_identifier:
+ return curve
+
+ def get_by_key_length(self, key_length):
+ for curve in self.ecdsa_curves:
+ if curve.key_length == key_length:
+ return curve
class ECDSAKey(PKey):
@@ -43,6 +95,12 @@ class ECDSAKey(PKey):
data.
"""
+ _ECDSA_CURVES = _ECDSACurveSet([
+ _ECDSACurve(ec.SECP256R1, 'nistp256'),
+ _ECDSACurve(ec.SECP384R1, 'nistp384'),
+ _ECDSACurve(ec.SECP521R1, 'nistp521'),
+ ])
+
def __init__(self, msg=None, data=None, filename=None, password=None,
vals=None, file_obj=None, validate_point=True):
self.verifying_key = None
@@ -57,41 +115,47 @@ class ECDSAKey(PKey):
msg = Message(data)
if vals is not None:
self.signing_key, self.verifying_key = vals
+ c_class = self.signing_key.curve.__class__
+ self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(c_class)
else:
if msg is None:
raise SSHException('Key object may not be empty')
- if msg.get_text() != 'ecdsa-sha2-nistp256':
+ self.ecdsa_curve = self._ECDSA_CURVES.get_by_key_format_identifier(
+ msg.get_text())
+ if self.ecdsa_curve is None:
raise SSHException('Invalid key')
curvename = msg.get_text()
- if curvename != 'nistp256':
+ if curvename != self.ecdsa_curve.nist_name:
raise SSHException("Can't handle curve of type %s" % curvename)
pointinfo = msg.get_binary()
- if pointinfo[0:1] != four_byte:
- raise SSHException('Point compression is being used: %s' %
- binascii.hexlify(pointinfo))
- curve = ec.SECP256R1()
- numbers = ec.EllipticCurvePublicNumbers(
- x=inflate_long(pointinfo[1:1 + curve.key_size // 8], always_positive=True),
- y=inflate_long(pointinfo[1 + curve.key_size // 8:], always_positive=True),
- curve=curve
- )
+ try:
+ numbers = ec.EllipticCurvePublicNumbers.from_encoded_point(
+ self.ecdsa_curve.curve_class(), pointinfo
+ )
+ except ValueError:
+ raise SSHException("Invalid public key")
self.verifying_key = numbers.public_key(backend=default_backend())
- self.size = 256
+
+ @classmethod
+ def supported_key_format_identifiers(cls):
+ return cls._ECDSA_CURVES.get_key_format_identifier_list()
def asbytes(self):
key = self.verifying_key
m = Message()
- m.add_string('ecdsa-sha2-nistp256')
- m.add_string('nistp256')
+ m.add_string(self.ecdsa_curve.key_format_identifier)
+ m.add_string(self.ecdsa_curve.nist_name)
numbers = key.public_numbers()
+ key_size_bytes = (key.curve.key_size + 7) // 8
+
x_bytes = deflate_long(numbers.x, add_sign_padding=False)
- x_bytes = b'\x00' * (len(x_bytes) - key.curve.key_size // 8) + x_bytes
+ x_bytes = b'\x00' * (key_size_bytes - len(x_bytes)) + x_bytes
y_bytes = deflate_long(numbers.y, add_sign_padding=False)
- y_bytes = b'\x00' * (len(y_bytes) - key.curve.key_size // 8) + y_bytes
+ y_bytes = b'\x00' * (key_size_bytes - len(y_bytes)) + y_bytes
point_str = four_byte + x_bytes + y_bytes
m.add_string(point_str)
@@ -107,34 +171,35 @@ class ECDSAKey(PKey):
return hash(h)
def get_name(self):
- return 'ecdsa-sha2-nistp256'
+ return self.ecdsa_curve.key_format_identifier
def get_bits(self):
- return self.size
+ return self.ecdsa_curve.key_length
def can_sign(self):
return self.signing_key is not None
def sign_ssh_data(self, data):
- signer = self.signing_key.signer(ec.ECDSA(hashes.SHA256()))
+ ecdsa = ec.ECDSA(self.ecdsa_curve.hash_object())
+ signer = self.signing_key.signer(ecdsa)
signer.update(data)
sig = signer.finalize()
- r, s = decode_rfc6979_signature(sig)
+ r, s = decode_dss_signature(sig)
m = Message()
- m.add_string('ecdsa-sha2-nistp256')
+ m.add_string(self.ecdsa_curve.key_format_identifier)
m.add_string(self._sigencode(r, s))
return m
def verify_ssh_sig(self, data, msg):
- if msg.get_text() != 'ecdsa-sha2-nistp256':
+ if msg.get_text() != self.ecdsa_curve.key_format_identifier:
return False
sig = msg.get_binary()
sigR, sigS = self._sigdecode(sig)
- signature = encode_rfc6979_signature(sigR, sigS)
+ signature = encode_dss_signature(sigR, sigS)
verifier = self.verifying_key.verifier(
- signature, ec.ECDSA(hashes.SHA256())
+ signature, ec.ECDSA(self.ecdsa_curve.hash_object())
)
verifier.update(data)
try:
@@ -160,19 +225,25 @@ class ECDSAKey(PKey):
password=password
)
- @staticmethod
- def generate(curve=ec.SECP256R1(), progress_func=None):
+ @classmethod
+ def generate(cls, curve=ec.SECP256R1(), progress_func=None, bits=None):
"""
Generate a new private ECDSA key. This factory function can be used to
generate a new host key or authentication key.
- :param function progress_func: Not used for this type of key.
+ :param progress_func: Not used for this type of key.
:returns: A new private key (`.ECDSAKey`) object
"""
+ if bits is not None:
+ curve = cls._ECDSA_CURVES.get_by_key_length(bits)
+ if curve is None:
+ raise ValueError("Unsupported key length: %d" % bits)
+ curve = curve.curve_class()
+
private_key = ec.generate_private_key(curve, backend=default_backend())
return ECDSAKey(vals=(private_key, private_key.public_key()))
- ### internals...
+ # ...internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file('EC', filename, password)
@@ -187,12 +258,13 @@ class ECDSAKey(PKey):
key = serialization.load_der_private_key(
data, password=None, backend=default_backend()
)
- except ValueError as e:
+ except (ValueError, AssertionError) as e:
raise SSHException(str(e))
self.signing_key = key
self.verifying_key = key.public_key()
- self.size = key.curve.key_size
+ curve_class = key.curve.__class__
+ self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(curve_class)
def _sigencode(self, r, s):
msg = Message()
diff --git a/paramiko/file.py b/paramiko/file.py
index 05f2d6e6..5212091a 100644
--- a/paramiko/file.py
+++ b/paramiko/file.py
@@ -15,8 +15,9 @@
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
-from paramiko.common import linefeed_byte_value, crlf, cr_byte, linefeed_byte, \
- cr_byte_value
+from paramiko.common import (
+ linefeed_byte_value, crlf, cr_byte, linefeed_byte, cr_byte_value,
+)
from paramiko.py3compat import BytesIO, PY2, u, b, bytes_types
from paramiko.util import ClosingContextManager
@@ -66,7 +67,7 @@ class BufferedFile (ClosingContextManager):
file. This iterator happens to return the file itself, since a file is
its own iterator.
- :raises ValueError: if the file is closed.
+ :raises: ``ValueError`` -- if the file is closed.
"""
if self._closed:
raise ValueError('I/O operation on closed file')
@@ -92,10 +93,10 @@ class BufferedFile (ClosingContextManager):
def next(self):
"""
Returns the next line from the input, or raises
- `~exceptions.StopIteration` when EOF is hit. Unlike Python file
+ ``StopIteration`` when EOF is hit. Unlike Python file
objects, it's okay to mix calls to `next` and `readline`.
- :raises StopIteration: when the end of the file is reached.
+ :raises: ``StopIteration`` -- when the end of the file is reached.
:returns: a line (`str`) read from the file.
"""
@@ -106,11 +107,11 @@ class BufferedFile (ClosingContextManager):
else:
def __next__(self):
"""
- Returns the next line from the input, or raises `.StopIteration` when
- EOF is hit. Unlike python file objects, it's okay to mix calls to
- `.next` and `.readline`.
+ Returns the next line from the input, or raises ``StopIteration``
+ when EOF is hit. Unlike python file objects, it's okay to mix
+ calls to `.next` and `.readline`.
- :raises StopIteration: when the end of the file is reached.
+ :raises: ``StopIteration`` -- when the end of the file is reached.
:returns: a line (`str`) read from the file.
"""
@@ -151,8 +152,8 @@ class BufferedFile (ClosingContextManager):
def readinto(self, buff):
"""
- Read up to ``len(buff)`` bytes into :class:`bytearray` *buff* and
- return the number of bytes read.
+ Read up to ``len(buff)`` bytes into ``bytearray`` *buff* and return the
+ number of bytes read.
:returns:
The number of bytes read.
@@ -163,9 +164,9 @@ class BufferedFile (ClosingContextManager):
def read(self, size=None):
"""
- Read at most ``size`` bytes from the file (less if we hit the end of the
- file first). If the ``size`` argument is negative or omitted, read all
- the remaining data in the file.
+ Read at most ``size`` bytes from the file (less if we hit the end of
+ the file first). If the ``size`` argument is negative or omitted,
+ read all the remaining data in the file.
.. note::
``'b'`` mode flag is ignored (``self.FLAG_BINARY`` in
@@ -250,7 +251,11 @@ class BufferedFile (ClosingContextManager):
line = self._rbuffer
truncated = False
while True:
- if self._at_trailing_cr and (self._flags & self.FLAG_UNIVERSAL_NEWLINE) and (len(line) > 0):
+ if (
+ self._at_trailing_cr and
+ self._flags & self.FLAG_UNIVERSAL_NEWLINE and
+ len(line) > 0
+ ):
# edge case: the newline may be '\r\n' and we may have read
# only the first '\r' last time.
if line[0] == linefeed_byte_value:
@@ -271,7 +276,13 @@ class BufferedFile (ClosingContextManager):
n = size - len(line)
else:
n = self._bufsize
- if (linefeed_byte in line) or ((self._flags & self.FLAG_UNIVERSAL_NEWLINE) and (cr_byte in line)):
+ if (
+ linefeed_byte in line or
+ (
+ self._flags & self.FLAG_UNIVERSAL_NEWLINE and
+ cr_byte in line
+ )
+ ):
break
try:
new_data = self._read(n)
@@ -294,12 +305,20 @@ class BufferedFile (ClosingContextManager):
self._pos += len(line)
return line if self._flags & self.FLAG_BINARY else u(line)
xpos = pos + 1
- if (line[pos] == cr_byte_value) and (xpos < len(line)) and (line[xpos] == linefeed_byte_value):
+ if (
+ line[pos] == cr_byte_value and
+ xpos < len(line) and
+ line[xpos] == linefeed_byte_value
+ ):
xpos += 1
# if the string was truncated, _rbuffer needs to have the string after
# the newline character plus the truncated part of the line we stored
# earlier in _rbuffer
- self._rbuffer = line[xpos:] + self._rbuffer if truncated else line[xpos:]
+ if truncated:
+ self._rbuffer = line[xpos:] + self._rbuffer
+ else:
+ self._rbuffer = line[xpos:]
+
lf = line[pos:xpos]
line = line[:pos] + linefeed_byte
if (len(self._rbuffer) == 0) and (lf == cr_byte):
@@ -349,7 +368,7 @@ class BufferedFile (ClosingContextManager):
type of movement: 0 = absolute; 1 = relative to the current
position; 2 = relative to the end of the file.
- :raises IOError: if the file doesn't support random access.
+ :raises: ``IOError`` -- if the file doesn't support random access.
"""
raise IOError('File does not support seeking.')
@@ -370,7 +389,7 @@ class BufferedFile (ClosingContextManager):
written yet. (Use `flush` or `close` to force buffered data to be
written out.)
- :param str data: data to write
+ :param data: ``str``/``bytes`` data to write
"""
data = b(data)
if self._closed:
@@ -404,7 +423,7 @@ class BufferedFile (ClosingContextManager):
name is intended to match `readlines`; `writelines` does not add line
separators.)
- :param iterable sequence: an iterable sequence of strings.
+ :param sequence: an iterable sequence of strings.
"""
for line in sequence:
self.write(line)
@@ -421,7 +440,7 @@ class BufferedFile (ClosingContextManager):
def closed(self):
return self._closed
- ### overrides...
+ # ...overrides...
def _read(self, size):
"""
@@ -449,7 +468,7 @@ class BufferedFile (ClosingContextManager):
"""
return 0
- ### internals...
+ # ...internals...
def _set_mode(self, mode='r', bufsize=-1):
"""
@@ -513,7 +532,10 @@ class BufferedFile (ClosingContextManager):
return
if self.newlines is None:
self.newlines = newline
- elif self.newlines != newline and isinstance(self.newlines, bytes_types):
+ elif (
+ self.newlines != newline and
+ isinstance(self.newlines, bytes_types)
+ ):
self.newlines = (self.newlines, newline)
elif newline not in self.newlines:
self.newlines += (newline,)
diff --git a/paramiko/hostkeys.py b/paramiko/hostkeys.py
index 38ac866b..008ba592 100644
--- a/paramiko/hostkeys.py
+++ b/paramiko/hostkeys.py
@@ -90,7 +90,7 @@ class HostKeys (MutableMapping):
:param str filename: name of the file to read host keys from
- :raises IOError: if there was an error reading the file
+ :raises: ``IOError`` -- if there was an error reading the file
"""
with open(filename, 'r') as f:
for lineno, line in enumerate(f, 1):
@@ -111,14 +111,14 @@ class HostKeys (MutableMapping):
def save(self, filename):
"""
- Save host keys into a file, in the format used by OpenSSH. The order of
- keys in the file will be preserved when possible (if these keys were
+ Save host keys into a file, in the format used by OpenSSH. The order
+ of keys in the file will be preserved when possible (if these keys were
loaded from a file originally). The single exception is that combined
lines will be split into individual key lines, which is arguably a bug.
:param str filename: name of the file to write
- :raises IOError: if there was an error writing the file
+ :raises: ``IOError`` -- if there was an error writing the file
.. versionadded:: 1.6.1
"""
@@ -135,7 +135,8 @@ class HostKeys (MutableMapping):
returned. The keytype will be either ``"ssh-rsa"`` or ``"ssh-dss"``.
:param str hostname: the hostname (or IP) to lookup
- :return: dict of `str` -> `.PKey` keys associated with this host (or ``None``)
+ :return: dict of `str` -> `.PKey` keys associated with this host
+ (or ``None``)
"""
class SubDict (MutableMapping):
def __init__(self, hostname, entries, hostkeys):
@@ -178,17 +179,35 @@ class HostKeys (MutableMapping):
self._hostkeys._entries.append(e)
def keys(self):
- return [e.key.get_name() for e in self._entries if e.key is not None]
+ return [
+ e.key.get_name() for e in self._entries
+ if e.key is not None
+ ]
entries = []
for e in self._entries:
- for h in e.hostnames:
- if h.startswith('|1|') and not hostname.startswith('|1|') and constant_time_bytes_eq(self.hash_host(hostname, h), h) or h == hostname:
- entries.append(e)
+ if self._hostname_matches(hostname, e):
+ entries.append(e)
if len(entries) == 0:
return None
return SubDict(hostname, entries, self)
+ def _hostname_matches(self, hostname, entry):
+ """
+ Tests whether ``hostname`` string matches given SubDict ``entry``.
+
+ :returns bool:
+ """
+ for h in entry.hostnames:
+ if (
+ h == hostname or
+ h.startswith('|1|') and
+ not hostname.startswith('|1|') and
+ constant_time_bytes_eq(self.hash_host(hostname, h), h)
+ ):
+ return True
+ return False
+
def check(self, hostname, key):
"""
Return True if the given key is associated with the given hostname
@@ -220,15 +239,22 @@ class HostKeys (MutableMapping):
def __len__(self):
return len(self.keys())
- def __delitem__(self, key):
- k = self[key]
-
def __getitem__(self, key):
ret = self.lookup(key)
if ret is None:
raise KeyError(key)
return ret
+ def __delitem__(self, key):
+ index = None
+ for i, entry in enumerate(self._entries):
+ if self._hostname_matches(key, entry):
+ index = i
+ break
+ if index is None:
+ raise KeyError(key)
+ self._entries.pop(index)
+
def __setitem__(self, hostname, entry):
# don't use this please.
if len(entry) == 0:
@@ -237,7 +263,7 @@ class HostKeys (MutableMapping):
for key_type in entry.keys():
found = False
for e in self._entries:
- if (hostname in e.hostnames) and (e.key.get_name() == key_type):
+ if (hostname in e.hostnames) and e.key.get_name() == key_type:
# replace
e.key = entry[key_type]
found = True
@@ -266,7 +292,8 @@ class HostKeys (MutableMapping):
hashed hostnames in the known_hosts file.
:param str hostname: the hostname to hash
- :param str salt: optional salt to use when hashing (must be 20 bytes long)
+ :param str salt: optional salt to use when hashing
+ (must be 20 bytes long)
:return: the hashed hostname as a `str`
"""
if salt is None:
@@ -331,7 +358,7 @@ class HostKeyEntry:
key = RSAKey(data=decodebytes(key))
elif keytype == 'ssh-dss':
key = DSSKey(data=decodebytes(key))
- elif keytype == 'ecdsa-sha2-nistp256':
+ elif keytype in ECDSAKey.supported_key_format_identifiers():
key = ECDSAKey(data=decodebytes(key), validate_point=False)
else:
log.info("Unable to handle key of type %s" % (keytype,))
@@ -349,8 +376,10 @@ class HostKeyEntry:
included.
"""
if self.valid:
- return '%s %s %s\n' % (','.join(self.hostnames), self.key.get_name(),
- self.key.get_base64())
+ return '%s %s %s\n' % (
+ ','.join(self.hostnames),
+ self.key.get_name(),
+ self.key.get_base64())
return None
def __repr__(self):
diff --git a/paramiko/kex_gex.py b/paramiko/kex_gex.py
index c980b690..ba45da18 100644
--- a/paramiko/kex_gex.py
+++ b/paramiko/kex_gex.py
@@ -34,8 +34,10 @@ from paramiko.ssh_exception import SSHException
_MSG_KEXDH_GEX_REQUEST_OLD, _MSG_KEXDH_GEX_GROUP, _MSG_KEXDH_GEX_INIT, \
_MSG_KEXDH_GEX_REPLY, _MSG_KEXDH_GEX_REQUEST = range(30, 35)
+
c_MSG_KEXDH_GEX_REQUEST_OLD, c_MSG_KEXDH_GEX_GROUP, c_MSG_KEXDH_GEX_INIT, \
- c_MSG_KEXDH_GEX_REPLY, c_MSG_KEXDH_GEX_REQUEST = [byte_chr(c) for c in range(30, 35)]
+ c_MSG_KEXDH_GEX_REPLY, c_MSG_KEXDH_GEX_REQUEST = \
+ [byte_chr(c) for c in range(30, 35)]
class KexGex (object):
@@ -58,7 +60,8 @@ class KexGex (object):
def start_kex(self, _test_old_style=False):
if self.transport.server_mode:
- self.transport._expect_packet(_MSG_KEXDH_GEX_REQUEST, _MSG_KEXDH_GEX_REQUEST_OLD)
+ self.transport._expect_packet(
+ _MSG_KEXDH_GEX_REQUEST, _MSG_KEXDH_GEX_REQUEST_OLD)
return
# request a bit range: we accept (min_bits) to (max_bits), but prefer
# (preferred_bits). according to the spec, we shouldn't pull the
@@ -88,9 +91,10 @@ class KexGex (object):
return self._parse_kexdh_gex_reply(m)
elif ptype == _MSG_KEXDH_GEX_REQUEST_OLD:
return self._parse_kexdh_gex_request_old(m)
- raise SSHException('KexGex %s asked to handle packet type %d' % self.name, ptype)
+ raise SSHException(
+ 'KexGex %s asked to handle packet type %d' % self.name, ptype)
- ### internals...
+ # ...internals...
def _generate_x(self):
# generate an "x" (1 < x < (p-1)/2).
@@ -133,8 +137,12 @@ class KexGex (object):
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
- raise SSHException('Can\'t do server-side gex with no modulus pack')
- self.transport._log(DEBUG, 'Picking p (%d <= %d <= %d bits)' % (minbits, preferredbits, maxbits))
+ raise SSHException(
+ 'Can\'t do server-side gex with no modulus pack')
+ self.transport._log(
+ DEBUG,
+ 'Picking p (%d <= %d <= %d bits)' % (
+ minbits, preferredbits, maxbits))
self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_GROUP)
@@ -144,7 +152,8 @@ class KexGex (object):
self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
def _parse_kexdh_gex_request_old(self, m):
- # same as above, but without min_bits or max_bits (used by older clients like putty)
+ # same as above, but without min_bits or max_bits (used by older
+ # clients like putty)
self.preferred_bits = m.get_int()
# smoosh the user's preferred size into our own limits
if self.preferred_bits > self.max_bits:
@@ -154,9 +163,12 @@ class KexGex (object):
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
- raise SSHException('Can\'t do server-side gex with no modulus pack')
- self.transport._log(DEBUG, 'Picking p (~ %d bits)' % (self.preferred_bits,))
- self.g, self.p = pack.get_modulus(self.min_bits, self.preferred_bits, self.max_bits)
+ raise SSHException(
+ 'Can\'t do server-side gex with no modulus pack')
+ self.transport._log(
+ DEBUG, 'Picking p (~ %d bits)' % (self.preferred_bits,))
+ self.g, self.p = pack.get_modulus(
+ self.min_bits, self.preferred_bits, self.max_bits)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_GROUP)
m.add_mpint(self.p)
@@ -171,7 +183,9 @@ class KexGex (object):
# reject if p's bit length < 1024 or > 8192
bitlen = util.bit_length(self.p)
if (bitlen < 1024) or (bitlen > 8192):
- raise SSHException('Server-generated gex p (don\'t ask) is out of range (%d bits)' % bitlen)
+ raise SSHException(
+ 'Server-generated gex p (don\'t ask) is out of range '
+ '(%d bits)' % bitlen)
self.transport._log(DEBUG, 'Got server p (%d bits)' % bitlen)
self._generate_x()
# now compute e = g^x mod p
@@ -190,7 +204,8 @@ class KexGex (object):
self.f = pow(self.g, self.x, self.p)
K = pow(self.e, self.x, self.p)
key = self.transport.get_server_key().asbytes()
- # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(self.transport.remote_version, self.transport.local_version,
self.transport.remote_kex_init, self.transport.local_kex_init,
@@ -225,7 +240,8 @@ class KexGex (object):
if (self.f < 1) or (self.f > self.p - 1):
raise SSHException('Server kex "f" is out of range')
K = pow(self.f, self.x, self.p)
- # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(self.transport.local_version, self.transport.remote_version,
self.transport.local_kex_init, self.transport.remote_kex_init,
@@ -244,6 +260,7 @@ class KexGex (object):
self.transport._verify_key(host_key, sig)
self.transport._activate_outbound()
+
class KexGexSHA256(KexGex):
name = 'diffie-hellman-group-exchange-sha256'
hash_algo = sha256
diff --git a/paramiko/kex_group1.py b/paramiko/kex_group1.py
index 9eee066c..e8f042b1 100644
--- a/paramiko/kex_group1.py
+++ b/paramiko/kex_group1.py
@@ -41,7 +41,7 @@ b0000000000000000 = zero_byte * 8
class KexGroup1(object):
# draft-ietf-secsh-transport-09.txt, page 17
- P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa
G = 2
name = 'diffie-hellman-group1-sha1'
@@ -75,14 +75,15 @@ class KexGroup1(object):
return self._parse_kexdh_reply(m)
raise SSHException('KexGroup1 asked to handle packet type %d' % ptype)
- ### internals...
+ # ...internals...
def _generate_x(self):
# generate an "x" (1 < x < q), where q is (p-1)/2.
- # p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
+ # p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
# therefore q can be approximated as a 2^1023. we drop the subset of
- # potential x where the first 63 bits are 1, because some of those will be
- # larger than q (but this is a tiny tiny subset of potential x).
+ # potential x where the first 63 bits are 1, because some of those
+ # will be larger than q (but this is a tiny tiny subset of
+ # potential x).
while 1:
x_bytes = os.urandom(128)
x_bytes = byte_mask(x_bytes[0], 0x7f) + x_bytes[1:]
@@ -99,7 +100,8 @@ class KexGroup1(object):
raise SSHException('Server kex "f" is out of range')
sig = m.get_binary()
K = pow(self.f, self.x, self.P)
- # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(self.transport.local_version, self.transport.remote_version,
self.transport.local_kex_init, self.transport.remote_kex_init)
@@ -118,7 +120,8 @@ class KexGroup1(object):
raise SSHException('Client kex "e" is out of range')
K = pow(self.e, self.x, self.P)
key = self.transport.get_server_key().asbytes()
- # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(self.transport.remote_version, self.transport.local_version,
self.transport.remote_kex_init, self.transport.local_kex_init)
diff --git a/paramiko/kex_group14.py b/paramiko/kex_group14.py
index 9f7dd216..22955e34 100644
--- a/paramiko/kex_group14.py
+++ b/paramiko/kex_group14.py
@@ -28,7 +28,7 @@ from hashlib import sha1
class KexGroup14(KexGroup1):
# http://tools.ietf.org/html/rfc3526#section-3
- P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa
G = 2
name = 'diffie-hellman-group14-sha1'
diff --git a/paramiko/kex_gss.py b/paramiko/kex_gss.py
index 69969f8a..ba24c0a0 100644
--- a/paramiko/kex_gss.py
+++ b/paramiko/kex_gss.py
@@ -40,19 +40,23 @@ This module provides GSS-API / SSPI Key Exchange as defined in :rfc:`4462`.
import os
from hashlib import sha1
-from paramiko.common import *
+from paramiko.common import * # noqa
from paramiko import util
from paramiko.message import Message
-from paramiko.py3compat import byte_chr, long, byte_mask, byte_ord
+from paramiko.py3compat import byte_chr, byte_mask, byte_ord
from paramiko.ssh_exception import SSHException
MSG_KEXGSS_INIT, MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_HOSTKEY,\
-MSG_KEXGSS_ERROR = range(30, 35)
+ MSG_KEXGSS_ERROR = range(30, 35)
MSG_KEXGSS_GROUPREQ, MSG_KEXGSS_GROUP = range(40, 42)
c_MSG_KEXGSS_INIT, c_MSG_KEXGSS_CONTINUE, c_MSG_KEXGSS_COMPLETE,\
-c_MSG_KEXGSS_HOSTKEY, c_MSG_KEXGSS_ERROR = [byte_chr(c) for c in range(30, 35)]
-c_MSG_KEXGSS_GROUPREQ, c_MSG_KEXGSS_GROUP = [byte_chr(c) for c in range(40, 42)]
+ c_MSG_KEXGSS_HOSTKEY, c_MSG_KEXGSS_ERROR = [
+ byte_chr(c) for c in range(30, 35)
+ ]
+c_MSG_KEXGSS_GROUPREQ, c_MSG_KEXGSS_GROUP = [
+ byte_chr(c) for c in range(40, 42)
+]
class KexGSSGroup1(object):
@@ -61,10 +65,10 @@ class KexGSSGroup1(object):
4462 Section 2 <https://tools.ietf.org/html/rfc4462.html#section-2>`_
"""
# draft-ietf-secsh-transport-09.txt, page 17
- P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa
G = 2
- b7fffffffffffffff = byte_chr(0x7f) + max_byte * 7
- b0000000000000000 = zero_byte * 8
+ b7fffffffffffffff = byte_chr(0x7f) + max_byte * 7 # noqa
+ b0000000000000000 = zero_byte * 8 # noqa
NAME = "gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g=="
def __init__(self, transport):
@@ -104,7 +108,7 @@ class KexGSSGroup1(object):
"""
Parse the next packet.
- :param char ptype: The type of the incomming packet
+ :param ptype: The (string) type of the incoming packet
:param `.Message` m: The paket content
"""
if self.transport.server_mode and (ptype == MSG_KEXGSS_INIT):
@@ -127,14 +131,14 @@ class KexGSSGroup1(object):
generate an "x" (1 < x < q), where q is (p-1)/2.
p is a 128-byte (1024-bit) number, where the first 64 bits are 1.
therefore q can be approximated as a 2^1023. we drop the subset of
- potential x where the first 63 bits are 1, because some of those will be
- larger than q (but this is a tiny tiny subset of potential x).
+ potential x where the first 63 bits are 1, because some of those will
+ be larger than q (but this is a tiny tiny subset of potential x).
"""
while 1:
x_bytes = os.urandom(128)
x_bytes = byte_mask(x_bytes[0], 0x7f) + x_bytes[1:]
- if (x_bytes[:8] != self.b7fffffffffffffff) and \
- (x_bytes[:8] != self.b0000000000000000):
+ first = x_bytes[:8]
+ if first not in (self.b7fffffffffffffff, self.b0000000000000000):
break
self.x = util.inflate_long(x_bytes)
@@ -156,18 +160,21 @@ class KexGSSGroup1(object):
"""
Parse the SSH2_MSG_KEXGSS_CONTINUE message.
- :param `.Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE message
+ :param `.Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE
+ message
"""
if not self.transport.server_mode:
srv_token = m.get_string()
m = Message()
m.add_byte(c_MSG_KEXGSS_CONTINUE)
- m.add_string(self.kexgss.ssh_init_sec_context(target=self.gss_host,
- recv_token=srv_token))
+ m.add_string(self.kexgss.ssh_init_sec_context(
+ target=self.gss_host, recv_token=srv_token))
self.transport.send_message(m)
- self.transport._expect_packet(MSG_KEXGSS_CONTINUE,
- MSG_KEXGSS_COMPLETE,
- MSG_KEXGSS_ERROR)
+ self.transport._expect_packet(
+ MSG_KEXGSS_CONTINUE,
+ MSG_KEXGSS_COMPLETE,
+ MSG_KEXGSS_ERROR
+ )
else:
pass
@@ -175,7 +182,8 @@ class KexGSSGroup1(object):
"""
Parse the SSH2_MSG_KEXGSS_COMPLETE message (client mode).
- :param `.Message` m: The content of the SSH2_MSG_KEXGSS_COMPLETE message
+ :param `.Message` m: The content of the
+ SSH2_MSG_KEXGSS_COMPLETE message
"""
# client mode
if self.transport.host_key is None:
@@ -190,7 +198,8 @@ class KexGSSGroup1(object):
if bool:
srv_token = m.get_string()
K = pow(self.f, self.x, self.P)
- # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(self.transport.local_version, self.transport.remote_version,
self.transport.local_kex_init, self.transport.remote_kex_init)
@@ -223,7 +232,8 @@ class KexGSSGroup1(object):
K = pow(self.e, self.x, self.P)
self.transport.host_key = NullHostKey()
key = self.transport.host_key.__str__()
- # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || e || f || K)
hm = Message()
hm.add(self.transport.remote_version, self.transport.local_version,
self.transport.remote_kex_init, self.transport.local_kex_init)
@@ -271,7 +281,7 @@ class KexGSSGroup1(object):
maj_status = m.get_int()
min_status = m.get_int()
err_msg = m.get_string()
- lang_tag = m.get_string() # we don't care about the language!
+ m.get_string() # we don't care about the language!
raise SSHException("GSS-API Error:\nMajor Status: %s\nMinor Status: %s\
\nError Message: %s\n") % (str(maj_status),
str(min_status),
@@ -284,7 +294,7 @@ class KexGSSGroup14(KexGSSGroup1):
in `RFC 4462 Section 2
<https://tools.ietf.org/html/rfc4462.html#section-2>`_
"""
- P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF
+ P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa
G = 2
NAME = "gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g=="
@@ -335,7 +345,7 @@ class KexGSSGex(object):
"""
Parse the next packet.
- :param char ptype: The type of the incomming packet
+ :param ptype: The (string) type of the incoming packet
:param `.Message` m: The paket content
"""
if ptype == MSG_KEXGSS_GROUPREQ:
@@ -378,7 +388,8 @@ class KexGSSGex(object):
"""
Parse the SSH2_MSG_KEXGSS_GROUPREQ message (server mode).
- :param `.Message` m: The content of the SSH2_MSG_KEXGSS_GROUPREQ message
+ :param `.Message` m: The content of the
+ SSH2_MSG_KEXGSS_GROUPREQ message
"""
minbits = m.get_int()
preferredbits = m.get_int()
@@ -402,8 +413,12 @@ class KexGSSGex(object):
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
- raise SSHException('Can\'t do server-side gex with no modulus pack')
- self.transport._log(DEBUG, 'Picking p (%d <= %d <= %d bits)' % (minbits, preferredbits, maxbits))
+ raise SSHException(
+ 'Can\'t do server-side gex with no modulus pack')
+ self.transport._log(
+ DEBUG, # noqa
+ 'Picking p (%d <= %d <= %d bits)' % (
+ minbits, preferredbits, maxbits))
self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
m = Message()
m.add_byte(c_MSG_KEXGSS_GROUP)
@@ -423,8 +438,10 @@ class KexGSSGex(object):
# reject if p's bit length < 1024 or > 8192
bitlen = util.bit_length(self.p)
if (bitlen < 1024) or (bitlen > 8192):
- raise SSHException('Server-generated gex p (don\'t ask) is out of range (%d bits)' % bitlen)
- self.transport._log(DEBUG, 'Got server p (%d bits)' % bitlen)
+ raise SSHException(
+ 'Server-generated gex p (don\'t ask) is out of range '
+ '(%d bits)' % bitlen)
+ self.transport._log(DEBUG, 'Got server p (%d bits)' % bitlen) # noqa
self._generate_x()
# now compute e = g^x mod p
self.e = pow(self.g, self.x, self.p)
@@ -453,7 +470,8 @@ class KexGSSGex(object):
K = pow(self.e, self.x, self.p)
self.transport.host_key = NullHostKey()
key = self.transport.host_key.__str__()
- # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(self.transport.remote_version, self.transport.local_version,
self.transport.remote_kex_init, self.transport.local_kex_init,
@@ -543,7 +561,8 @@ class KexGSSGex(object):
if (self.f < 1) or (self.f > self.p - 1):
raise SSHException('Server kex "f" is out of range')
K = pow(self.f, self.x, self.p)
- # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K)
+ # okay, build up the hash H of
+ # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(self.transport.local_version, self.transport.remote_version,
self.transport.local_kex_init, self.transport.remote_kex_init,
@@ -584,7 +603,7 @@ class KexGSSGex(object):
maj_status = m.get_int()
min_status = m.get_int()
err_msg = m.get_string()
- lang_tag = m.get_string() # we don't care about the language!
+ m.get_string() # we don't care about the language (lang_tag)!
raise SSHException("GSS-API Error:\nMajor Status: %s\nMinor Status: %s\
\nError Message: %s\n") % (str(maj_status),
str(min_status),
diff --git a/paramiko/message.py b/paramiko/message.py
index bf4c6b95..02af2526 100644
--- a/paramiko/message.py
+++ b/paramiko/message.py
@@ -32,7 +32,7 @@ class Message (object):
An SSH2 message is a stream of bytes that encodes some combination of
strings, integers, bools, and infinite-precision integers (known in Python
as longs). This class builds or breaks down such a byte stream.
-
+
Normally you don't need to deal with anything this low-level, but it's
exposed for people implementing custom extensions, or features that
paramiko doesn't support yet.
@@ -184,7 +184,6 @@ class Message (object):
@rtype: string
"""
return u(self.get_bytes(self.get_int()))
- #return self.get_bytes(self.get_size())
def get_binary(self):
"""
@@ -200,7 +199,7 @@ class Message (object):
def get_list(self):
"""
Fetch a `list` of `strings <str>` from the stream.
-
+
These are trivially encoded as comma-separated values in a string.
"""
return self.get_text().split(',')
@@ -208,7 +207,7 @@ class Message (object):
def add_bytes(self, b):
"""
Write bytes to the stream, without any formatting.
-
+
:param str b: bytes to add
"""
self.packet.write(b)
@@ -217,7 +216,7 @@ class Message (object):
def add_byte(self, b):
"""
Write a single byte to the stream, without any formatting.
-
+
:param str b: byte to add
"""
self.packet.write(b)
@@ -226,7 +225,7 @@ class Message (object):
def add_boolean(self, b):
"""
Add a boolean value to the stream.
-
+
:param bool b: boolean value to add
"""
if b:
@@ -234,20 +233,20 @@ class Message (object):
else:
self.packet.write(zero_byte)
return self
-
+
def add_int(self, n):
"""
Add an integer to the stream.
-
+
:param int n: integer to add
"""
self.packet.write(struct.pack('>I', n))
return self
-
+
def add_adaptive_int(self, n):
"""
Add an integer to the stream.
-
+
:param int n: integer to add
"""
if n >= Message.big_int:
@@ -270,7 +269,7 @@ class Message (object):
"""
Add a long int to the stream, encoded as an infinite-precision
integer. This method only works on positive numbers.
-
+
:param long z: long int to add
"""
self.add_string(util.deflate_long(z))
@@ -279,7 +278,7 @@ class Message (object):
def add_string(self, s):
"""
Add a string to the stream.
-
+
:param str s: string to add
"""
s = asbytes(s)
@@ -292,12 +291,12 @@ class Message (object):
Add a list of strings to the stream. They are encoded identically to
a single string of values separated by commas. (Yes, really, that's
how SSH2 does it.)
-
+
:param list l: list of strings to add
"""
self.add_string(','.join(l))
return self
-
+
def _add(self, i):
if type(i) is bool:
return self.add_boolean(i)
@@ -315,7 +314,7 @@ class Message (object):
.. warning::
Longs are encoded non-deterministically. Don't use this method.
-
+
:param seq: the sequence of items
"""
for item in seq:
diff --git a/paramiko/packet.py b/paramiko/packet.py
index 00cf5657..95a26c6e 100644
--- a/paramiko/packet.py
+++ b/paramiko/packet.py
@@ -29,8 +29,10 @@ import time
from hmac import HMAC
from paramiko import util
-from paramiko.common import linefeed_byte, cr_byte_value, asbytes, MSG_NAMES, \
- DEBUG, xffffffff, zero_byte
+from paramiko.common import (
+ linefeed_byte, cr_byte_value, asbytes, MSG_NAMES, DEBUG, xffffffff,
+ zero_byte,
+)
from paramiko.py3compat import u, byte_ord
from paramiko.ssh_exception import SSHException, ProxyCommandFailure
from paramiko.message import Message
@@ -41,9 +43,19 @@ def compute_hmac(key, message, digest_class):
class NeedRekeyException (Exception):
+ """
+ Exception indicating a rekey is needed.
+ """
pass
+def first_arg(e):
+ arg = None
+ if type(e.args) is tuple and len(e.args) > 0:
+ arg = e.args[0]
+ return arg
+
+
class Packetizer (object):
"""
Implementation of the base SSH packet protocol.
@@ -54,8 +66,11 @@ class Packetizer (object):
REKEY_PACKETS = pow(2, 29)
REKEY_BYTES = pow(2, 29)
- REKEY_PACKETS_OVERFLOW_MAX = pow(2, 29) # Allow receiving this many packets after a re-key request before terminating
- REKEY_BYTES_OVERFLOW_MAX = pow(2, 29) # Allow receiving this many bytes after a re-key request before terminating
+ # Allow receiving this many packets after a re-key request before
+ # terminating
+ REKEY_PACKETS_OVERFLOW_MAX = pow(2, 29)
+ # Allow receiving this many bytes after a re-key request before terminating
+ REKEY_BYTES_OVERFLOW_MAX = pow(2, 29)
def __init__(self, socket):
self.__socket = socket
@@ -103,13 +118,18 @@ class Packetizer (object):
self.__handshake_complete = False
self.__timer_expired = False
+ @property
+ def closed(self):
+ return self.__closed
+
def set_log(self, log):
"""
Set the Python log object to use for logging.
"""
self.__logger = log
- def set_outbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key, sdctr=False):
+ def set_outbound_cipher(self, block_engine, block_size, mac_engine,
+ mac_size, mac_key, sdctr=False):
"""
Switch outbound data cipher.
"""
@@ -121,13 +141,15 @@ class Packetizer (object):
self.__mac_key_out = mac_key
self.__sent_bytes = 0
self.__sent_packets = 0
- # wait until the reset happens in both directions before clearing rekey flag
+ # wait until the reset happens in both directions before clearing
+ # rekey flag
self.__init_count |= 1
if self.__init_count == 3:
self.__init_count = 0
self.__need_rekey = False
- def set_inbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key):
+ def set_inbound_cipher(
+ self, block_engine, block_size, mac_engine, mac_size, mac_key):
"""
Switch inbound data cipher.
"""
@@ -140,7 +162,8 @@ class Packetizer (object):
self.__received_packets = 0
self.__received_bytes_overflow = 0
self.__received_packets_overflow = 0
- # wait until the reset happens in both directions before clearing rekey flag
+ # wait until the reset happens in both directions before clearing
+ # rekey flag
self.__init_count |= 2
if self.__init_count == 3:
self.__init_count = 0
@@ -233,8 +256,9 @@ class Packetizer (object):
:param int n: number of bytes to read
:return: the data read, as a `str`
- :raises EOFError:
- if the socket was closed before all the bytes could be read
+ :raises:
+ ``EOFError`` -- if the socket was closed before all the bytes could
+ be read
"""
out = bytes()
# handle over-reading from reading the banner line
@@ -258,9 +282,10 @@ class Packetizer (object):
# on Linux, sometimes instead of socket.timeout, we get
# EAGAIN. this is a bug in recent (> 2.6.9) kernels but
# we need to work around it.
- if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN):
+ arg = first_arg(e)
+ if arg == errno.EAGAIN:
got_timeout = True
- elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR):
+ elif arg == errno.EINTR:
# syscall interrupted; try again
pass
elif self.__closed:
@@ -285,9 +310,10 @@ class Packetizer (object):
except socket.timeout:
retry_write = True
except socket.error as e:
- if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN):
+ arg = first_arg(e)
+ if arg == errno.EAGAIN:
retry_write = True
- elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR):
+ elif arg == errno.EINTR:
# syscall interrupted; try again
retry_write = True
else:
@@ -303,11 +329,11 @@ class Packetizer (object):
n = -1
else:
if n == 0 and iteration_with_zero_as_return_value > 10:
- # We shouldn't retry the write, but we didn't
- # manage to send anything over the socket. This might be an
- # indication that we have lost contact with the remote side,
- # but are yet to receive an EOFError or other socket errors.
- # Let's give it some iteration to try and catch up.
+ # We shouldn't retry the write, but we didn't
+ # manage to send anything over the socket. This might be an
+ # indication that we have lost contact with the remote
+ # side, but are yet to receive an EOFError or other socket
+ # errors. Let's give it some iteration to try and catch up.
n = -1
iteration_with_zero_as_return_value += 1
if n < 0:
@@ -323,7 +349,7 @@ class Packetizer (object):
line, so it's okay to attempt large reads.
"""
buf = self.__remainder
- while not linefeed_byte in buf:
+ while linefeed_byte not in buf:
buf += self._read_timeout(timeout)
n = buf.index(linefeed_byte)
self.__remainder = buf[n + 1:]
@@ -350,7 +376,9 @@ class Packetizer (object):
data = self.__compress_engine_out(data)
packet = self._build_packet(data)
if self.__dump_packets:
- self._log(DEBUG, 'Write packet <%s>, length %d' % (cmd_name, orig_len))
+ self._log(
+ DEBUG,
+ 'Write packet <%s>, length %d' % (cmd_name, orig_len))
self._log(DEBUG, util.format_binary(packet, 'OUT: '))
if self.__block_engine_out is not None:
out = self.__block_engine_out.update(packet)
@@ -358,15 +386,23 @@ class Packetizer (object):
out = packet
# + mac
if self.__block_engine_out is not None:
- payload = struct.pack('>I', self.__sequence_number_out) + packet
- out += compute_hmac(self.__mac_key_out, payload, self.__mac_engine_out)[:self.__mac_size_out]
- self.__sequence_number_out = (self.__sequence_number_out + 1) & xffffffff
+ payload = struct.pack(
+ '>I', self.__sequence_number_out) + packet
+ out += compute_hmac(
+ self.__mac_key_out,
+ payload,
+ self.__mac_engine_out)[:self.__mac_size_out]
+ self.__sequence_number_out = \
+ (self.__sequence_number_out + 1) & xffffffff
self.write_all(out)
self.__sent_bytes += len(out)
self.__sent_packets += 1
- if (self.__sent_packets >= self.REKEY_PACKETS or self.__sent_bytes >= self.REKEY_BYTES)\
- and not self.__need_rekey:
+ sent_too_much = (
+ self.__sent_packets >= self.REKEY_PACKETS or
+ self.__sent_bytes >= self.REKEY_BYTES
+ )
+ if sent_too_much and not self.__need_rekey:
# only ask once for rekeying
self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes sent)' %
(self.__sent_packets, self.__sent_bytes))
@@ -381,8 +417,8 @@ class Packetizer (object):
Only one thread should ever be in this function (no other locking is
done).
- :raises SSHException: if the packet is mangled
- :raises NeedRekeyException: if the transport should rekey
+ :raises: `.SSHException` -- if the packet is mangled
+ :raises: `.NeedRekeyException` -- if the transport should rekey
"""
header = self.read_all(self.__block_size_in, check_rekey=True)
if self.__block_engine_in is not None:
@@ -406,15 +442,21 @@ class Packetizer (object):
if self.__mac_size_in > 0:
mac = post_packet[:self.__mac_size_in]
- mac_payload = struct.pack('>II', self.__sequence_number_in, packet_size) + packet
- my_mac = compute_hmac(self.__mac_key_in, mac_payload, self.__mac_engine_in)[:self.__mac_size_in]
+ mac_payload = struct.pack(
+ '>II', self.__sequence_number_in, packet_size) + packet
+ my_mac = compute_hmac(
+ self.__mac_key_in,
+ mac_payload,
+ self.__mac_engine_in)[:self.__mac_size_in]
if not util.constant_time_bytes_eq(my_mac, mac):
raise SSHException('Mismatched MAC')
padding = byte_ord(packet[0])
payload = packet[1:packet_size - padding]
if self.__dump_packets:
- self._log(DEBUG, 'Got payload (%d bytes, %d padding)' % (packet_size, padding))
+ self._log(
+ DEBUG,
+ 'Got payload (%d bytes, %d padding)' % (packet_size, padding))
if self.__compress_engine_in is not None:
payload = self.__compress_engine_in(payload)
@@ -432,9 +474,12 @@ class Packetizer (object):
# dropping the connection
self.__received_bytes_overflow += raw_packet_size
self.__received_packets_overflow += 1
- if (self.__received_packets_overflow >= self.REKEY_PACKETS_OVERFLOW_MAX) or \
- (self.__received_bytes_overflow >= self.REKEY_BYTES_OVERFLOW_MAX):
- raise SSHException('Remote transport is ignoring rekey requests')
+ if (self.__received_packets_overflow >=
+ self.REKEY_PACKETS_OVERFLOW_MAX) or \
+ (self.__received_bytes_overflow >=
+ self.REKEY_BYTES_OVERFLOW_MAX):
+ raise SSHException(
+ 'Remote transport is ignoring rekey requests')
elif (self.__received_packets >= self.REKEY_PACKETS) or \
(self.__received_bytes >= self.REKEY_BYTES):
# only ask once for rekeying
@@ -450,10 +495,12 @@ class Packetizer (object):
else:
cmd_name = '$%x' % cmd
if self.__dump_packets:
- self._log(DEBUG, 'Read packet <%s>, length %d' % (cmd_name, len(payload)))
+ self._log(
+ DEBUG,
+ 'Read packet <%s>, length %d' % (cmd_name, len(payload)))
return cmd, msg
- ########## protected
+ # ...protected...
def _log(self, level, msg):
if self.__logger is None:
@@ -465,8 +512,11 @@ class Packetizer (object):
self.__logger.log(level, msg)
def _check_keepalive(self):
- if (not self.__keepalive_interval) or (not self.__block_engine_out) or \
- self.__need_rekey:
+ if (
+ not self.__keepalive_interval or
+ not self.__block_engine_out or
+ self.__need_rekey
+ ):
# wait till we're encrypting, and not in the middle of rekeying
return
now = time.time()
@@ -485,8 +535,7 @@ class Packetizer (object):
except socket.timeout:
pass
except EnvironmentError as e:
- if (type(e.args) is tuple and len(e.args) > 0 and
- e.args[0] == errno.EINTR):
+ if first_arg(e) == errno.EINTR:
pass
else:
raise
@@ -504,7 +553,8 @@ class Packetizer (object):
packet = struct.pack('>IB', len(payload) + padding + 1, padding)
packet += payload
if self.__sdctr_out or self.__block_engine_out is None:
- # cute trick i caught openssh doing: if we're not encrypting or SDCTR mode (RFC4344),
+ # cute trick i caught openssh doing: if we're not encrypting or
+ # SDCTR mode (RFC4344),
# don't waste random bytes for the padding
packet += (zero_byte * padding)
else:
diff --git a/paramiko/pipe.py b/paramiko/pipe.py
index 4f62d7c5..6ca37703 100644
--- a/paramiko/pipe.py
+++ b/paramiko/pipe.py
@@ -28,7 +28,6 @@ will trigger as readable in `select <select.select>`.
import sys
import os
import socket
-from paramiko.py3compat import b
def make_pipe():
@@ -45,13 +44,13 @@ class PosixPipe (object):
self._set = False
self._forever = False
self._closed = False
-
+
def close(self):
os.close(self._rfd)
os.close(self._wfd)
# used for unit tests:
self._closed = True
-
+
def fileno(self):
return self._rfd
@@ -60,13 +59,13 @@ class PosixPipe (object):
return
os.read(self._rfd, 1)
self._set = False
-
+
def set(self):
if self._set or self._closed:
return
self._set = True
os.write(self._wfd, b'*')
-
+
def set_forever(self):
self._forever = True
self.set()
@@ -81,39 +80,39 @@ class WindowsPipe (object):
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(('127.0.0.1', 0))
serv.listen(1)
-
+
# need to save sockets in _rsock/_wsock so they don't get closed
self._rsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._rsock.connect(('127.0.0.1', serv.getsockname()[1]))
-
+
self._wsock, addr = serv.accept()
serv.close()
self._set = False
self._forever = False
self._closed = False
-
+
def close(self):
self._rsock.close()
self._wsock.close()
# used for unit tests:
self._closed = True
-
+
def fileno(self):
return self._rsock.fileno()
- def clear (self):
+ def clear(self):
if not self._set or self._forever:
return
self._rsock.recv(1)
self._set = False
-
- def set (self):
+
+ def set(self):
if self._set or self._closed:
return
self._set = True
self._wsock.send(b'*')
- def set_forever (self):
+ def set_forever(self):
self._forever = True
self.set()
@@ -123,12 +122,12 @@ class OrPipe (object):
self._set = False
self._partner = None
self._pipe = pipe
-
+
def set(self):
self._set = True
if not self._partner._set:
self._pipe.set()
-
+
def clear(self):
self._set = False
if not self._partner._set:
@@ -146,4 +145,3 @@ def make_or_pipe(pipe):
p1._partner = p2
p2._partner = p1
return p1, p2
-
diff --git a/paramiko/pkey.py b/paramiko/pkey.py
index 0637a6f0..f5b0cd18 100644
--- a/paramiko/pkey.py
+++ b/paramiko/pkey.py
@@ -65,9 +65,10 @@ class PKey(object):
:param .Message msg:
an optional SSH `.Message` containing a public key of this type.
- :param str data: an optional string containing a public key of this type
+ :param str data: an optional string containing a public key
+ of this type
- :raises SSHException:
+ :raises: `.SSHException` --
if a key cannot be created from the ``data`` or ``msg`` given, or
no key was passed in.
"""
@@ -85,6 +86,8 @@ class PKey(object):
return self.asbytes()
# noinspection PyUnresolvedReferences
+ # TODO: The comparison functions should be removed as per:
+ # https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
def __cmp__(self, other):
"""
Compare this key to another. Returns 0 if this key is equivalent to
@@ -92,13 +95,13 @@ class PKey(object):
of the key are compared, so a public key will compare equal to its
corresponding private key.
- :param .Pkey other: key to compare to.
+ :param .PKey other: key to compare to.
"""
hs = hash(self)
ho = hash(other)
if hs != ho:
- return cmp(hs, ho)
- return cmp(self.asbytes(), other.asbytes())
+ return cmp(hs, ho) # noqa
+ return cmp(self.asbytes(), other.asbytes()) # noqa
def __eq__(self, other):
return hash(self) == hash(other)
@@ -188,10 +191,10 @@ class PKey(object):
encrypted
:return: a new `.PKey` based on the given private key
- :raises IOError: if there was an error reading the file
- :raises PasswordRequiredException: if the private key file is
+ :raises: ``IOError`` -- if there was an error reading the file
+ :raises: `.PasswordRequiredException` -- if the private key file is
encrypted, and ``password`` is ``None``
- :raises SSHException: if the key file is invalid
+ :raises: `.SSHException` -- if the key file is invalid
"""
key = cls(filename=filename, password=password)
return key
@@ -209,10 +212,10 @@ class PKey(object):
an optional password to use to decrypt the key, if it's encrypted
:return: a new `.PKey` based on the given private key
- :raises IOError: if there was an error reading the key
- :raises PasswordRequiredException:
+ :raises: ``IOError`` -- if there was an error reading the key
+ :raises: `.PasswordRequiredException` --
if the private key file is encrypted, and ``password`` is ``None``
- :raises SSHException: if the key file is invalid
+ :raises: `.SSHException` -- if the key file is invalid
"""
key = cls(file_obj=file_obj, password=password)
return key
@@ -226,8 +229,8 @@ class PKey(object):
:param str password:
an optional password to use to encrypt the key file
- :raises IOError: if there was an error writing the file
- :raises SSHException: if the key is invalid
+ :raises: ``IOError`` -- if there was an error writing the file
+ :raises: `.SSHException` -- if the key is invalid
"""
raise Exception('Not implemented in PKey')
@@ -239,8 +242,8 @@ class PKey(object):
:param file_obj: the file-like object to write into
:param str password: an optional password to use to encrypt the key
- :raises IOError: if there was an error writing to the file
- :raises SSHException: if the key is invalid
+ :raises: ``IOError`` -- if there was an error writing to the file
+ :raises: `.SSHException` -- if the key is invalid
"""
raise Exception('Not implemented in PKey')
@@ -249,20 +252,21 @@ class PKey(object):
Read an SSH2-format private key file, looking for a string of the type
``"BEGIN xxx PRIVATE KEY"`` for some ``xxx``, base64-decode the text we
find, and return it as a string. If the private key is encrypted and
- ``password`` is not ``None``, the given password will be used to decrypt
- the key (otherwise `.PasswordRequiredException` is thrown).
+ ``password`` is not ``None``, the given password will be used to
+ decrypt the key (otherwise `.PasswordRequiredException` is thrown).
- :param str tag: ``"RSA"`` or ``"DSA"``, the tag used to mark the data block.
+ :param str tag: ``"RSA"`` or ``"DSA"``, the tag used to mark the
+ data block.
:param str filename: name of the file to read.
:param str password:
an optional password to use to decrypt the key file, if it's
encrypted.
:return: data blob (`str`) that makes up the private key.
- :raises IOError: if there was an error reading the file.
- :raises PasswordRequiredException: if the private key file is
+ :raises: ``IOError`` -- if there was an error reading the file.
+ :raises: `.PasswordRequiredException` -- if the private key file is
encrypted, and ``password`` is ``None``.
- :raises SSHException: if the key file is invalid.
+ :raises: `.SSHException` -- if the key file is invalid.
"""
with open(filename, 'r') as f:
data = self._read_private_key(tag, f, password)
@@ -271,7 +275,8 @@ class PKey(object):
def _read_private_key(self, tag, f, password=None):
lines = f.readlines()
start = 0
- while (start < len(lines)) and (lines[start].strip() != '-----BEGIN ' + tag + ' PRIVATE KEY-----'):
+ beginning_of_key = '-----BEGIN ' + tag + ' PRIVATE KEY-----'
+ while start < len(lines) and lines[start].strip() != beginning_of_key:
start += 1
if start >= len(lines):
raise SSHException('not a valid ' + tag + ' private key file')
@@ -286,7 +291,8 @@ class PKey(object):
start += 1
# find end
end = start
- while end < len(lines) and lines[end].strip() != '-----END ' + tag + ' PRIVATE KEY-----':
+ ending_of_key = '-----END ' + tag + ' PRIVATE KEY-----'
+ while end < len(lines) and lines[end].strip() != ending_of_key:
end += 1
# if we trudged to the end of the file, just try to cope.
try:
@@ -298,14 +304,17 @@ class PKey(object):
return data
# encrypted keyfile: will need a password
if headers['proc-type'] != '4,ENCRYPTED':
- raise SSHException('Unknown private key structure "%s"' % headers['proc-type'])
+ raise SSHException(
+ 'Unknown private key structure "%s"' % headers['proc-type'])
try:
encryption_type, saltstr = headers['dek-info'].split(',')
except:
raise SSHException("Can't parse DEK-info in private key file")
if encryption_type not in self._CIPHER_TABLE:
- raise SSHException('Unknown private key cipher "%s"' % encryption_type)
- # if no password was passed in, raise an exception pointing out that we need one
+ raise SSHException(
+ 'Unknown private key cipher "%s"' % encryption_type)
+ # if no password was passed in,
+ # raise an exception pointing out that we need one
if password is None:
raise PasswordRequiredException('Private key file is encrypted')
cipher = self._CIPHER_TABLE[encryption_type]['cipher']
@@ -331,10 +340,9 @@ class PKey(object):
:param str data: data blob that makes up the private key.
:param str password: an optional password to use to encrypt the file.
- :raises IOError: if there was an error writing the file.
+ :raises: ``IOError`` -- if there was an error writing the file.
"""
- with open(filename, 'w', o600) as f:
- # grrr... the mode doesn't always take hold
+ with open(filename, 'w') as f:
os.chmod(filename, o600)
self._write_private_key(f, key, format)
diff --git a/paramiko/primes.py b/paramiko/primes.py
index d0e17575..48a34e53 100644
--- a/paramiko/primes.py
+++ b/paramiko/primes.py
@@ -25,7 +25,7 @@ import os
from paramiko import util
from paramiko.py3compat import byte_mask, long
from paramiko.ssh_exception import SSHException
-from paramiko.common import *
+from paramiko.common import * # noqa
def _roll_random(n):
@@ -62,7 +62,8 @@ class ModulusPack (object):
self.discarded = []
def _parse_modulus(self, line):
- timestamp, mod_type, tests, tries, size, generator, modulus = line.split()
+ timestamp, mod_type, tests, tries, size, generator, modulus = \
+ line.split()
mod_type = int(mod_type)
tests = int(tests)
tries = int(tries)
@@ -74,8 +75,13 @@ class ModulusPack (object):
# type 2 (meets basic structural requirements)
# test 4 (more than just a small-prime sieve)
# tries < 100 if test & 4 (at least 100 tries of miller-rabin)
- if (mod_type < 2) or (tests < 4) or ((tests & 4) and (tests < 8) and (tries < 100)):
- self.discarded.append((modulus, 'does not meet basic requirements'))
+ if (
+ mod_type < 2 or
+ tests < 4 or
+ (tests & 4 and tests < 8 and tries < 100)
+ ):
+ self.discarded.append(
+ (modulus, 'does not meet basic requirements'))
return
if generator == 0:
generator = 2
@@ -85,7 +91,8 @@ class ModulusPack (object):
# this is okay.
bl = util.bit_length(modulus)
if (bl != size) and (bl != size + 1):
- self.discarded.append((modulus, 'incorrectly reported bit length %d' % size))
+ self.discarded.append(
+ (modulus, 'incorrectly reported bit length %d' % size))
return
if bl not in self.pack:
self.pack[bl] = []
diff --git a/paramiko/proxy.py b/paramiko/proxy.py
index ca602c4c..c4ec627c 100644
--- a/paramiko/proxy.py
+++ b/paramiko/proxy.py
@@ -17,11 +17,9 @@
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
-from datetime import datetime
import os
from shlex import split as shlsplit
import signal
-from subprocess import Popen, PIPE
from select import select
import socket
import time
@@ -38,7 +36,7 @@ class ProxyCommand(ClosingContextManager):
`.Transport` and `.Packetizer` classes. Using this class instead of a
regular socket makes it possible to talk with a Popen'd command that will
proxy traffic between the client and a server hosted in another machine.
-
+
Instances of this class may be used as context managers.
"""
def __init__(self, command_line):
@@ -49,10 +47,13 @@ class ProxyCommand(ClosingContextManager):
:param str command_line:
the command that should be executed and used as the proxy.
"""
+ # NOTE: subprocess import done lazily so platforms without it (e.g.
+ # GAE) can still import us during overall Paramiko load.
+ from subprocess import Popen, PIPE
self.cmd = shlsplit(command_line)
- self.process = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ self.process = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE,
+ bufsize=0)
self.timeout = None
- self.buffer = []
def send(self, content):
"""
@@ -77,11 +78,12 @@ class ProxyCommand(ClosingContextManager):
:param int size: how many chars should be read
- :return: the length of the read content, as an `int`
+ :return: the string of bytes read, which may be shorter than requested
"""
try:
+ buffer = b''
start = time.time()
- while len(self.buffer) < size:
+ while len(buffer) < size:
select_timeout = None
if self.timeout is not None:
elapsed = (time.time() - start)
@@ -92,16 +94,13 @@ class ProxyCommand(ClosingContextManager):
r, w, x = select(
[self.process.stdout], [], [], select_timeout)
if r and r[0] == self.process.stdout:
- b = os.read(
- self.process.stdout.fileno(), size - len(self.buffer))
- # Store in class-level buffer for persistence across
- # timeouts; this makes us act more like a real socket
- # (where timeouts don't actually drop data.)
- self.buffer.extend(b)
- result = ''.join(self.buffer)
- self.buffer = []
- return result
+ buffer += os.read(
+ self.process.stdout.fileno(), size - len(buffer))
+ return buffer
except socket.timeout:
+ if buffer:
+ # Don't raise socket.timeout, return partial result instead
+ return buffer
raise # socket.timeout is a subclass of IOError
except IOError as e:
raise ProxyCommandFailure(' '.join(self.cmd), e.strerror)
@@ -109,5 +108,14 @@ class ProxyCommand(ClosingContextManager):
def close(self):
os.kill(self.process.pid, signal.SIGTERM)
+ @property
+ def closed(self):
+ return self.process.returncode is not None
+
+ @property
+ def _closed(self):
+ # Concession to Python 3 socket-like API
+ return self.closed
+
def settimeout(self, timeout):
self.timeout = timeout
diff --git a/paramiko/py3compat.py b/paramiko/py3compat.py
index 6fafc31d..095b0d09 100644
--- a/paramiko/py3compat.py
+++ b/paramiko/py3compat.py
@@ -1,20 +1,22 @@
import sys
import base64
-__all__ = ['PY2', 'string_types', 'integer_types', 'text_type', 'bytes_types', 'bytes', 'long', 'input',
- 'decodebytes', 'encodebytes', 'bytestring', 'byte_ord', 'byte_chr', 'byte_mask',
- 'b', 'u', 'b2s', 'StringIO', 'BytesIO', 'is_callable', 'MAXSIZE', 'next', 'builtins']
+__all__ = ['PY2', 'string_types', 'integer_types', 'text_type', 'bytes_types',
+ 'bytes', 'long', 'input', 'decodebytes', 'encodebytes',
+ 'bytestring', 'byte_ord', 'byte_chr', 'byte_mask', 'b', 'u', 'b2s',
+ 'StringIO', 'BytesIO', 'is_callable', 'MAXSIZE',
+ 'next', 'builtins']
PY2 = sys.version_info[0] < 3
if PY2:
- string_types = basestring
- text_type = unicode
+ string_types = basestring # NOQA
+ text_type = unicode # NOQA
bytes_types = str
bytes = str
- integer_types = (int, long)
- long = long
- input = raw_input
+ integer_types = (int, long) # NOQA
+ long = long # NOQA
+ input = raw_input # NOQA
decodebytes = base64.decodestring
encodebytes = base64.encodestring
@@ -22,7 +24,7 @@ if PY2:
def bytestring(s): # NOQA
- if isinstance(s, unicode):
+ if isinstance(s, unicode): # NOQA
return s.encode('utf-8')
return s
@@ -39,9 +41,9 @@ if PY2:
"""cast unicode or bytes to bytes"""
if isinstance(s, str):
return s
- elif isinstance(s, unicode):
+ elif isinstance(s, unicode): # NOQA
return s.encode(encoding)
- elif isinstance(s, buffer):
+ elif isinstance(s, buffer): # NOQA
return s
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
@@ -51,9 +53,9 @@ if PY2:
"""cast bytes or unicode to unicode"""
if isinstance(s, str):
return s.decode(encoding)
- elif isinstance(s, unicode):
+ elif isinstance(s, unicode): # NOQA
return s
- elif isinstance(s, buffer):
+ elif isinstance(s, buffer): # NOQA
return s.decode(encoding)
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
diff --git a/paramiko/resource.py b/paramiko/resource.py
index 9809afbe..5fed22ad 100644
--- a/paramiko/resource.py
+++ b/paramiko/resource.py
@@ -27,30 +27,30 @@ class ResourceManager (object):
"""
A registry of objects and resources that should be closed when those
objects are deleted.
-
+
This is meant to be a safer alternative to Python's ``__del__`` method,
which can cause reference cycles to never be collected. Objects registered
with the ResourceManager can be collected but still free resources when
they die.
-
+
Resources are registered using `register`, and when an object is garbage
collected, each registered resource is closed by having its ``close()``
method called. Multiple resources may be registered per object, but a
resource will only be closed once, even if multiple objects register it.
(The last object to register it wins.)
"""
-
+
def __init__(self):
self._table = {}
-
+
def register(self, obj, resource):
"""
Register a resource to be closed with an object is collected.
-
+
When the given ``obj`` is garbage-collected by the Python interpreter,
- the ``resource`` will be closed by having its ``close()`` method called.
- Any exceptions are ignored.
-
+ the ``resource`` will be closed by having its ``close()`` method
+ called. Any exceptions are ignored.
+
:param object obj: the object to track
:param object resource:
the resource to close when the object is collected
diff --git a/paramiko/rsakey.py b/paramiko/rsakey.py
index bc9053f5..f6d11a09 100644
--- a/paramiko/rsakey.py
+++ b/paramiko/rsakey.py
@@ -27,6 +27,7 @@ from cryptography.hazmat.primitives.asymmetric import rsa, padding
from paramiko.message import Message
from paramiko.pkey import PKey
+from paramiko.py3compat import PY2
from paramiko.ssh_exception import SSHException
@@ -36,7 +37,8 @@ class RSAKey(PKey):
data.
"""
- def __init__(self, msg=None, data=None, filename=None, password=None, key=None, file_obj=None):
+ def __init__(self, msg=None, data=None, filename=None, password=None,
+ key=None, file_obj=None):
self.key = None
if file_obj is not None:
self._from_private_key(file_obj, password)
@@ -76,7 +78,16 @@ class RSAKey(PKey):
return m.asbytes()
def __str__(self):
- return self.asbytes()
+ # NOTE: as per inane commentary in #853, this appears to be the least
+ # crummy way to get a representation that prints identical to Python
+ # 2's previous behavior, on both interpreters.
+ # TODO: replace with a nice clean fingerprint display or something
+ if PY2:
+ # Can't just return the .decode below for Py2 because stuff still
+ # tries stuffing it into ASCII for whatever godforsaken reason
+ return self.asbytes()
+ else:
+ return self.asbytes().decode('utf8', errors='ignore')
def __hash__(self):
h = hash(self.get_name())
@@ -149,7 +160,7 @@ class RSAKey(PKey):
generate a new host key or authentication key.
:param int bits: number of bits the generated key should be.
- :param function progress_func: Unused
+ :param progress_func: Unused
:return: new `.RSAKey` private key
"""
key = rsa.generate_private_key(
@@ -157,7 +168,7 @@ class RSAKey(PKey):
)
return RSAKey(key=key)
- ### internals...
+ # ...internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file('RSA', filename, password)
diff --git a/paramiko/server.py b/paramiko/server.py
index f79a1748..adc606bf 100644
--- a/paramiko/server.py
+++ b/paramiko/server.py
@@ -22,7 +22,10 @@
import threading
from paramiko import util
-from paramiko.common import DEBUG, ERROR, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, AUTH_FAILED, AUTH_SUCCESSFUL
+from paramiko.common import (
+ DEBUG, ERROR, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, AUTH_FAILED,
+ AUTH_SUCCESSFUL,
+)
from paramiko.py3compat import string_types
@@ -69,7 +72,7 @@ class ServerInterface (object):
- ``OPEN_FAILED_CONNECT_FAILED``
- ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
- ``OPEN_FAILED_RESOURCE_SHORTAGE``
-
+
The default implementation always returns
``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
@@ -103,15 +106,15 @@ class ServerInterface (object):
Determine if a client may open channels with no (further)
authentication.
- Return `.AUTH_FAILED` if the client must authenticate, or
- `.AUTH_SUCCESSFUL` if it's okay for the client to not
+ Return ``AUTH_FAILED`` if the client must authenticate, or
+ ``AUTH_SUCCESSFUL`` if it's okay for the client to not
authenticate.
- The default implementation always returns `.AUTH_FAILED`.
+ The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the client.
:return:
- `.AUTH_FAILED` if the authentication fails; `.AUTH_SUCCESSFUL` if
+ ``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
it succeeds.
:rtype: int
"""
@@ -122,21 +125,21 @@ class ServerInterface (object):
Determine if a given username and password supplied by the client is
acceptable for use in authentication.
- Return `.AUTH_FAILED` if the password is not accepted,
- `.AUTH_SUCCESSFUL` if the password is accepted and completes
- the authentication, or `.AUTH_PARTIALLY_SUCCESSFUL` if your
+ Return ``AUTH_FAILED`` if the password is not accepted,
+ ``AUTH_SUCCESSFUL`` if the password is accepted and completes
+ the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this key is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
- The default implementation always returns `.AUTH_FAILED`.
+ The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client.
:param str password: the password given by the client.
:return:
- `.AUTH_FAILED` if the authentication fails; `.AUTH_SUCCESSFUL` if
- it succeeds; `.AUTH_PARTIALLY_SUCCESSFUL` if the password auth is
+ ``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
+ it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the password auth is
successful, but authentication must continue.
:rtype: int
"""
@@ -149,9 +152,9 @@ class ServerInterface (object):
check the username and key and decide if you would accept a signature
made using this key.
- Return `.AUTH_FAILED` if the key is not accepted,
- `.AUTH_SUCCESSFUL` if the key is accepted and completes the
- authentication, or `.AUTH_PARTIALLY_SUCCESSFUL` if your
+ Return ``AUTH_FAILED`` if the key is not accepted,
+ ``AUTH_SUCCESSFUL`` if the key is accepted and completes the
+ authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this password is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
@@ -160,54 +163,54 @@ class ServerInterface (object):
Note that you don't have to actually verify any key signtature here.
If you're willing to accept the key, Paramiko will do the work of
verifying the client's signature.
-
- The default implementation always returns `.AUTH_FAILED`.
+
+ The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client
:param .PKey key: the key object provided by the client
:return:
- `.AUTH_FAILED` if the client can't authenticate with this key;
- `.AUTH_SUCCESSFUL` if it can; `.AUTH_PARTIALLY_SUCCESSFUL` if it
+ ``AUTH_FAILED`` if the client can't authenticate with this key;
+ ``AUTH_SUCCESSFUL`` if it can; ``AUTH_PARTIALLY_SUCCESSFUL`` if it
can authenticate with this key but must continue with
authentication
:rtype: int
"""
return AUTH_FAILED
-
+
def check_auth_interactive(self, username, submethods):
"""
Begin an interactive authentication challenge, if supported. You
should override this method in server mode if you want to support the
``"keyboard-interactive"`` auth type, which requires you to send a
series of questions for the client to answer.
-
- Return `.AUTH_FAILED` if this auth method isn't supported. Otherwise,
+
+ Return ``AUTH_FAILED`` if this auth method isn't supported. Otherwise,
you should return an `.InteractiveQuery` object containing the prompts
and instructions for the user. The response will be sent via a call
to `check_auth_interactive_response`.
-
- The default implementation always returns `.AUTH_FAILED`.
-
+
+ The default implementation always returns ``AUTH_FAILED``.
+
:param str username: the username of the authenticating client
:param str submethods:
a comma-separated list of methods preferred by the client (usually
empty)
:return:
- `.AUTH_FAILED` if this auth method isn't supported; otherwise an
+ ``AUTH_FAILED`` if this auth method isn't supported; otherwise an
object containing queries for the user
:rtype: int or `.InteractiveQuery`
"""
return AUTH_FAILED
-
+
def check_auth_interactive_response(self, responses):
"""
Continue or finish an interactive authentication challenge, if
supported. You should override this method in server mode if you want
to support the ``"keyboard-interactive"`` auth type.
-
- Return `.AUTH_FAILED` if the responses are not accepted,
- `.AUTH_SUCCESSFUL` if the responses are accepted and complete
- the authentication, or `.AUTH_PARTIALLY_SUCCESSFUL` if your
+
+ Return ``AUTH_FAILED`` if the responses are not accepted,
+ ``AUTH_SUCCESSFUL`` if the responses are accepted and complete
+ the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this set of responses is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
@@ -218,12 +221,12 @@ class ServerInterface (object):
client to respond with more answers, calling this method again. This
cycle can continue indefinitely.
- The default implementation always returns `.AUTH_FAILED`.
+ The default implementation always returns ``AUTH_FAILED``.
:param list responses: list of `str` responses from the client
:return:
- `.AUTH_FAILED` if the authentication fails; `.AUTH_SUCCESSFUL` if
- it succeeds; `.AUTH_PARTIALLY_SUCCESSFUL` if the interactive auth
+ ``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
+ it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the interactive auth
is successful, but authentication must continue; otherwise an
object containing queries for the user
:rtype: int or `.InteractiveQuery`
@@ -240,8 +243,8 @@ class ServerInterface (object):
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
- :return: `.AUTH_FAILED` if the user is not authenticated otherwise
- `.AUTH_SUCCESSFUL`
+ :return: ``AUTH_FAILED`` if the user is not authenticated otherwise
+ ``AUTH_SUCCESSFUL``
:rtype: int
:note: Kerberos credential delegation is not supported.
:see: `.ssh_gss`
@@ -250,10 +253,11 @@ class ServerInterface (object):
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a cetain
- plattform like Linux, you should call C{krb5_kuserok()} in your
- local kerberos library to make sure that the krb5_principal has
- an account on the server and is allowed to log in as a user.
- :see: `http://www.unix.com/man-page/all/3/krb5_kuserok/`
+ plattform like Linux, you should call C{krb5_kuserok()} in
+ your local kerberos library to make sure that the
+ krb5_principal has an account on the server and is allowed to
+ log in as a user.
+ :see: http://www.unix.com/man-page/all/3/krb5_kuserok/
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
@@ -271,20 +275,21 @@ class ServerInterface (object):
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
- :return: `.AUTH_FAILED` if the user is not authenticated otherwise
- `.AUTH_SUCCESSFUL`
+ :return: ``AUTH_FAILED`` if the user is not authenticated otherwise
+ ``AUTH_SUCCESSFUL``
:rtype: int
:note: Kerberos credential delegation is not supported.
- :see: `.ssh_gss` `.kex_gss`
+ :see: `.ssh_gss` `.kex_gss`
:note: : We are just checking in L{AuthHandler} that the given user is
a valid krb5 principal!
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a cetain
- plattform like Linux, you should call C{krb5_kuserok()} in your
- local kerberos library to make sure that the krb5_principal has
- an account on the server and is allowed to log in as a user.
- :see: `http://www.unix.com/man-page/all/3/krb5_kuserok/`
+ plattform like Linux, you should call C{krb5_kuserok()} in
+ your local kerberos library to make sure that the
+ krb5_principal has an account on the server and is allowed
+ to log in as a user.
+ :see: http://www.unix.com/man-page/all/3/krb5_kuserok/
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
@@ -296,14 +301,12 @@ class ServerInterface (object):
authentication.
The default implementation always returns false.
- :return: True if GSSAPI authentication is enabled otherwise false
- :rtype: Boolean
- :see: : `.ssh_gss`
+ :returns bool: Whether GSSAPI authentication is enabled.
+ :see: `.ssh_gss`
"""
UseGSSAPI = False
- GSSAPICleanupCredentials = False
return UseGSSAPI
-
+
def check_port_forward_request(self, address, port):
"""
Handle a request for port forwarding. The client is asking that
@@ -312,11 +315,11 @@ class ServerInterface (object):
address (any address associated with this server) and a port of ``0``
indicates that no specific port is requested (usually the OS will pick
a port).
-
+
The default implementation always returns ``False``, rejecting the
port forwarding request. If the request is accepted, you should return
the port opened for listening.
-
+
:param str address: the requested address
:param int port: the requested port
:return:
@@ -324,18 +327,18 @@ class ServerInterface (object):
to reject
"""
return False
-
+
def cancel_port_forward_request(self, address, port):
"""
The client would like to cancel a previous port-forwarding request.
If the given address and port is being forwarded across this ssh
connection, the port should be closed.
-
+
:param str address: the forwarded address
:param int port: the forwarded port
"""
pass
-
+
def check_global_request(self, kind, msg):
"""
Handle a global request of the given ``kind``. This method is called
@@ -354,7 +357,7 @@ class ServerInterface (object):
The default implementation always returns ``False``, indicating that it
does not support any global requests.
-
+
.. note:: Port forwarding requests are handled separately, in
`check_port_forward_request`.
@@ -366,10 +369,11 @@ class ServerInterface (object):
"""
return False
- ### Channel requests
+ # ...Channel requests...
- def check_channel_pty_request(self, channel, term, width, height, pixelwidth, pixelheight,
- modes):
+ def check_channel_pty_request(
+ self, channel, term, width, height, pixelwidth, pixelheight,
+ modes):
"""
Determine if a pseudo-terminal of the given dimensions (usually
requested for shell access) can be provided on the given channel.
@@ -385,7 +389,7 @@ class ServerInterface (object):
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return:
- ``True`` if the psuedo-terminal has been allocated; ``False``
+ ``True`` if the pseudo-terminal has been allocated; ``False``
otherwise.
"""
return False
@@ -411,20 +415,20 @@ class ServerInterface (object):
Determine if a shell command will be executed for the client. If this
method returns ``True``, the channel should be connected to the stdin,
stdout, and stderr of the shell command.
-
+
The default implementation always returns ``False``.
-
+
:param .Channel channel: the `.Channel` the request arrived on.
:param str command: the command to execute.
:return:
``True`` if this channel is now hooked up to the stdin, stdout, and
stderr of the executing command; ``False`` if the command will not
be executed.
-
+
.. versionadded:: 1.1
"""
return False
-
+
def check_channel_subsystem_request(self, channel, name):
"""
Determine if a requested subsystem will be provided to the client on
@@ -447,14 +451,16 @@ class ServerInterface (object):
``True`` if this channel is now hooked up to the requested
subsystem; ``False`` if that subsystem can't or won't be provided.
"""
- handler_class, larg, kwarg = channel.get_transport()._get_subsystem_handler(name)
+ transport = channel.get_transport()
+ handler_class, larg, kwarg = transport._get_subsystem_handler(name)
if handler_class is None:
return False
handler = handler_class(channel, name, self, *larg, **kwarg)
handler.start()
return True
- def check_channel_window_change_request(self, channel, width, height, pixelwidth, pixelheight):
+ def check_channel_window_change_request(
+ self, channel, width, height, pixelwidth, pixelheight):
"""
Determine if the pseudo-terminal on the given channel can be resized.
This only makes sense if a pty was previously allocated on it.
@@ -471,15 +477,17 @@ class ServerInterface (object):
:return: ``True`` if the terminal was resized; ``False`` if not.
"""
return False
-
- def check_channel_x11_request(self, channel, single_connection, auth_protocol, auth_cookie, screen_number):
+
+ def check_channel_x11_request(
+ self, channel, single_connection, auth_protocol, auth_cookie,
+ screen_number):
"""
Determine if the client will be provided with an X11 session. If this
method returns ``True``, X11 applications should be routed through new
SSH channels, using `.Transport.open_x11_channel`.
-
+
The default implementation always returns ``False``.
-
+
:param .Channel channel: the `.Channel` the X11 request arrived on
:param bool single_connection:
``True`` if only a single X11 channel should be opened, else
@@ -529,7 +537,7 @@ class ServerInterface (object):
- ``OPEN_FAILED_CONNECT_FAILED``
- ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
- ``OPEN_FAILED_RESOURCE_SHORTAGE``
-
+
The default implementation always returns
``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
@@ -567,14 +575,14 @@ class InteractiveQuery (object):
"""
A query (set of prompts) for a user during interactive authentication.
"""
-
+
def __init__(self, name='', instructions='', *prompts):
"""
Create a new interactive query to send to the client. The name and
instructions are optional, but are generally displayed to the end
user. A list of prompts may be included, or they may be added via
the `add_prompt` method.
-
+
:param str name: name of this query
:param str instructions:
user instructions (usually short) about this query
@@ -588,12 +596,12 @@ class InteractiveQuery (object):
self.add_prompt(x)
else:
self.add_prompt(x[0], x[1])
-
+
def add_prompt(self, prompt, echo=True):
"""
Add a prompt to this query. The prompt should be a (reasonably short)
string. Multiple prompts can be added to the same query.
-
+
:param str prompt: the user prompt
:param bool echo:
``True`` (default) if the user's response should be echoed;
@@ -621,10 +629,11 @@ class SubsystemHandler (threading.Thread):
Create a new handler for a channel. This is used by `.ServerInterface`
to start up a new handler when a channel requests this subsystem. You
don't need to override this method, but if you do, be sure to pass the
- ``channel`` and ``name`` parameters through to the original ``__init__``
- method here.
+ ``channel`` and ``name`` parameters through to the original
+ ``__init__`` method here.
- :param .Channel channel: the channel associated with this subsystem request.
+ :param .Channel channel: the channel associated with this
+ subsystem request.
:param str name: name of the requested subsystem.
:param .ServerInterface server:
the server object for the session that started this subsystem
@@ -634,7 +643,7 @@ class SubsystemHandler (threading.Thread):
self.__transport = channel.get_transport()
self.__name = name
self.__server = server
-
+
def get_server(self):
"""
Return the `.ServerInterface` object associated with this channel and
@@ -644,11 +653,16 @@ class SubsystemHandler (threading.Thread):
def _run(self):
try:
- self.__transport._log(DEBUG, 'Starting handler for subsystem %s' % self.__name)
+ self.__transport._log(
+ DEBUG, 'Starting handler for subsystem %s' % self.__name)
self.start_subsystem(self.__name, self.__transport, self.__channel)
except Exception as e:
- self.__transport._log(ERROR, 'Exception in subsystem handler for "%s": %s' %
- (self.__name, str(e)))
+ self.__transport._log(
+ ERROR,
+ 'Exception in subsystem handler for "{0}": {1}'.format(
+ self.__name, e
+ )
+ )
self.__transport._log(ERROR, util.tb_strings())
try:
self.finish_subsystem()
@@ -663,8 +677,8 @@ class SubsystemHandler (threading.Thread):
subsystem is finished, this method will return. After this method
returns, the channel is closed.
- The combination of ``transport`` and ``channel`` are unique; this handler
- corresponds to exactly one `.Channel` on one `.Transport`.
+ The combination of ``transport`` and ``channel`` are unique; this
+ handler corresponds to exactly one `.Channel` on one `.Transport`.
.. note::
It is the responsibility of this method to exit if the underlying
@@ -676,7 +690,8 @@ class SubsystemHandler (threading.Thread):
:param str name: name of the requested subsystem.
:param .Transport transport: the server-mode `.Transport`.
- :param .Channel channel: the channel associated with this subsystem request.
+ :param .Channel channel: the channel associated with this subsystem
+ request.
"""
pass
diff --git a/paramiko/sftp.py b/paramiko/sftp.py
index f44a804d..e6786d10 100644
--- a/paramiko/sftp.py
+++ b/paramiko/sftp.py
@@ -26,15 +26,17 @@ from paramiko.message import Message
from paramiko.py3compat import byte_chr, byte_ord
-CMD_INIT, CMD_VERSION, CMD_OPEN, CMD_CLOSE, CMD_READ, CMD_WRITE, CMD_LSTAT, CMD_FSTAT, \
- CMD_SETSTAT, CMD_FSETSTAT, CMD_OPENDIR, CMD_READDIR, CMD_REMOVE, CMD_MKDIR, \
- CMD_RMDIR, CMD_REALPATH, CMD_STAT, CMD_RENAME, CMD_READLINK, CMD_SYMLINK = range(1, 21)
+CMD_INIT, CMD_VERSION, CMD_OPEN, CMD_CLOSE, CMD_READ, CMD_WRITE, CMD_LSTAT, \
+ CMD_FSTAT, CMD_SETSTAT, CMD_FSETSTAT, CMD_OPENDIR, CMD_READDIR, \
+ CMD_REMOVE, CMD_MKDIR, CMD_RMDIR, CMD_REALPATH, CMD_STAT, CMD_RENAME, \
+ CMD_READLINK, CMD_SYMLINK = range(1, 21)
CMD_STATUS, CMD_HANDLE, CMD_DATA, CMD_NAME, CMD_ATTRS = range(101, 106)
CMD_EXTENDED, CMD_EXTENDED_REPLY = range(200, 202)
SFTP_OK = 0
-SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, SFTP_BAD_MESSAGE, \
- SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, SFTP_OP_UNSUPPORTED = range(1, 9)
+SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, \
+ SFTP_BAD_MESSAGE, SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, \
+ SFTP_OP_UNSUPPORTED = range(1, 9)
SFTP_DESC = ['Success',
'End of file',
@@ -98,7 +100,7 @@ class BaseSFTP (object):
self.sock = None
self.ultra_debug = False
- ### internals...
+ # ...internals...
def _send_version(self):
self._send_packet(CMD_INIT, struct.pack('>I', _VERSION))
@@ -124,7 +126,7 @@ class BaseSFTP (object):
msg.add(*extension_pairs)
self._send_packet(CMD_VERSION, msg)
return version
-
+
def _log(self, level, msg, *args):
self.logger.log(level, msg, *args)
@@ -154,7 +156,7 @@ class BaseSFTP (object):
break
else:
x = self.sock.recv(n)
-
+
if len(x) == 0:
raise EOFError()
out += x
@@ -162,7 +164,6 @@ class BaseSFTP (object):
return out
def _send_packet(self, t, packet):
- #self._log(DEBUG2, 'write: %s (len=%d)' % (CMD_NAMES.get(t, '0x%02x' % t), len(packet)))
packet = asbytes(packet)
out = struct.pack('>I', len(packet) + 1) + byte_chr(t) + packet
if self.ultra_debug:
@@ -181,6 +182,5 @@ class BaseSFTP (object):
self._log(DEBUG, util.format_binary(data, 'IN: '))
if size > 0:
t = byte_ord(data[0])
- #self._log(DEBUG2, 'read: %s (len=%d)' % (CMD_NAMES.get(t), '0x%02x' % t, len(data)-1))
return t, data[1:]
return 0, bytes()
diff --git a/paramiko/sftp_attr.py b/paramiko/sftp_attr.py
index 0eaca30b..5597948a 100644
--- a/paramiko/sftp_attr.py
+++ b/paramiko/sftp_attr.py
@@ -84,7 +84,7 @@ class SFTPAttributes (object):
def __repr__(self):
return '<SFTPAttributes: %s>' % self._debug_str()
- ### internals...
+ # ...internals...
@classmethod
def _from_msg(cls, msg, filename=None, longname=None):
attr = cls()
@@ -189,9 +189,12 @@ class SFTPAttributes (object):
ks = 's'
else:
ks = '?'
- ks += self._rwx((self.st_mode & o700) >> 6, self.st_mode & stat.S_ISUID)
- ks += self._rwx((self.st_mode & o70) >> 3, self.st_mode & stat.S_ISGID)
- ks += self._rwx(self.st_mode & 7, self.st_mode & stat.S_ISVTX, True)
+ ks += self._rwx(
+ (self.st_mode & o700) >> 6, self.st_mode & stat.S_ISUID)
+ ks += self._rwx(
+ (self.st_mode & o70) >> 3, self.st_mode & stat.S_ISGID)
+ ks += self._rwx(
+ self.st_mode & 7, self.st_mode & stat.S_ISVTX, True)
else:
ks = '?---------'
# compute display date
@@ -201,9 +204,11 @@ class SFTPAttributes (object):
else:
if abs(time.time() - self.st_mtime) > 15552000:
# (15552000 = 6 months)
- datestr = time.strftime('%d %b %Y', time.localtime(self.st_mtime))
+ datestr = time.strftime(
+ '%d %b %Y', time.localtime(self.st_mtime))
else:
- datestr = time.strftime('%d %b %H:%M', time.localtime(self.st_mtime))
+ datestr = time.strftime(
+ '%d %b %H:%M', time.localtime(self.st_mtime))
filename = getattr(self, 'filename', '?')
# not all servers support uid/gid
@@ -217,7 +222,8 @@ class SFTPAttributes (object):
if size is None:
size = 0
- return '%s 1 %-8d %-8d %8d %-12s %s' % (ks, uid, gid, size, datestr, filename)
+ return '%s 1 %-8d %-8d %8d %-12s %s' % (
+ ks, uid, gid, size, datestr, filename)
def asbytes(self):
return b(str(self))
diff --git a/paramiko/sftp_client.py b/paramiko/sftp_client.py
index 0df94389..12fccb2f 100644
--- a/paramiko/sftp_client.py
+++ b/paramiko/sftp_client.py
@@ -28,13 +28,17 @@ from paramiko import util
from paramiko.channel import Channel
from paramiko.message import Message
from paramiko.common import INFO, DEBUG, o777
-from paramiko.py3compat import bytestring, b, u, long, string_types, bytes_types
-from paramiko.sftp import BaseSFTP, CMD_OPENDIR, CMD_HANDLE, SFTPError, CMD_READDIR, \
- CMD_NAME, CMD_CLOSE, SFTP_FLAG_READ, SFTP_FLAG_WRITE, SFTP_FLAG_CREATE, \
- SFTP_FLAG_TRUNC, SFTP_FLAG_APPEND, SFTP_FLAG_EXCL, CMD_OPEN, CMD_REMOVE, \
- CMD_RENAME, CMD_MKDIR, CMD_RMDIR, CMD_STAT, CMD_ATTRS, CMD_LSTAT, \
- CMD_SYMLINK, CMD_SETSTAT, CMD_READLINK, CMD_REALPATH, CMD_STATUS, SFTP_OK, \
- SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED
+from paramiko.py3compat import (
+ bytestring, b, u, long, string_types, bytes_types,
+)
+from paramiko.sftp import (
+ BaseSFTP, CMD_OPENDIR, CMD_HANDLE, SFTPError, CMD_READDIR, CMD_NAME,
+ CMD_CLOSE, SFTP_FLAG_READ, SFTP_FLAG_WRITE, SFTP_FLAG_CREATE,
+ SFTP_FLAG_TRUNC, SFTP_FLAG_APPEND, SFTP_FLAG_EXCL, CMD_OPEN, CMD_REMOVE,
+ CMD_RENAME, CMD_MKDIR, CMD_RMDIR, CMD_STAT, CMD_ATTRS, CMD_LSTAT,
+ CMD_SYMLINK, CMD_SETSTAT, CMD_READLINK, CMD_REALPATH, CMD_STATUS, SFTP_OK,
+ SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED,
+)
from paramiko.sftp_attr import SFTPAttributes
from paramiko.ssh_exception import SSHException
@@ -56,6 +60,7 @@ def _to_unicode(s):
except UnicodeError:
return s
+
b_slash = b'/'
@@ -78,8 +83,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
:param .Channel sock: an open `.Channel` using the ``"sftp"`` subsystem
- :raises SSHException: if there's an exception while negotiating
- sftp
+ :raises:
+ `.SSHException` -- if there's an exception while negotiating sftp
"""
BaseSFTP.__init__(self)
self.sock = sock
@@ -93,13 +98,16 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
if type(sock) is Channel:
# override default logger
transport = self.sock.get_transport()
- self.logger = util.get_logger(transport.get_log_channel() + '.sftp')
+ self.logger = util.get_logger(
+ transport.get_log_channel() + '.sftp')
self.ultra_debug = transport.get_hexdump()
try:
server_version = self._send_version()
except EOFError:
raise SSHException('EOF during negotiation')
- self._log(INFO, 'Opened sftp connection (server version %d)' % server_version)
+ self._log(
+ INFO,
+ 'Opened sftp connection (server version %d)' % server_version)
@classmethod
def from_transport(cls, t, window_size=None, max_packet_size=None):
@@ -111,7 +119,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
OpenSSH and should work adequately for both files transfers and
interactive sessions.
- :param .Transport t: an open `.Transport` which is already authenticated
+ :param .Transport t: an open `.Transport` which is already
+ authenticated
:param int window_size:
optional window size for the `.SFTPClient` session.
:param int max_packet_size:
@@ -136,9 +145,12 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
for m in msg:
self._log(level, m, *args)
else:
- # escape '%' in msg (they could come from file or directory names) before logging
- msg = msg.replace('%','%%')
- super(SFTPClient, self)._log(level, "[chan %s] " + msg, *([self.sock.get_name()] + list(args)))
+ # escape '%' in msg (they could come from file or directory names)
+ # before logging
+ msg = msg.replace('%', '%%')
+ super(SFTPClient, self)._log(
+ level,
+ "[chan %s] " + msg, *([self.sock.get_name()] + list(args)))
def close(self):
"""
@@ -160,7 +172,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
def listdir(self, path='.'):
"""
- Return a list containing the names of the entries in the given ``path``.
+ Return a list containing the names of the entries in the given
+ ``path``.
The list is in arbitrary order. It does not include the special
entries ``'.'`` and ``'..'`` even if they are present in the folder.
@@ -223,7 +236,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
``read_aheads``, an integer controlling how many
``SSH_FXP_READDIR`` requests are made to the server. The default of 50
should suffice for most file listings as each request/response cycle
- may contain multiple files (dependant on server implementation.)
+ may contain multiple files (dependent on server implementation.)
.. versionadded:: 1.15
"""
@@ -308,7 +321,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
:param int bufsize: desired buffering (-1 = default buffer size)
:return: an `.SFTPFile` object representing the open file
- :raises IOError: if the file could not be opened.
+ :raises: ``IOError`` -- if the file could not be opened.
"""
filename = self._adjust_cwd(filename)
self._log(DEBUG, 'open(%r, %r)' % (filename, mode))
@@ -328,7 +341,9 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_binary()
- self._log(DEBUG, 'open(%r, %r) -> %s' % (filename, mode, hexlify(handle)))
+ self._log(
+ DEBUG,
+ 'open(%r, %r) -> %s' % (filename, mode, hexlify(handle)))
return SFTPFile(self, handle, mode, bufsize)
# Python continues to vacillate about "open" vs "file"...
@@ -341,7 +356,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
:param str path: path (absolute or relative) of the file to remove
- :raises IOError: if the path refers to a folder (directory)
+ :raises: ``IOError`` -- if the path refers to a folder (directory)
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'remove(%r)' % path)
@@ -356,7 +371,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
:param str oldpath: existing name of the file or folder
:param str newpath: new name for the file or folder
- :raises IOError: if ``newpath`` is a folder, or something else goes
+ :raises:
+ ``IOError`` -- if ``newpath`` is a folder, or something else goes
wrong
"""
oldpath = self._adjust_cwd(oldpath)
@@ -480,12 +496,12 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
def utime(self, path, times):
"""
- Set the access and modified times of the file specified by ``path``. If
- ``times`` is ``None``, then the file's access and modified times are set
- to the current time. Otherwise, ``times`` must be a 2-tuple of numbers,
- of the form ``(atime, mtime)``, which is used to set the access and
- modified times, respectively. This bizarre API is mimicked from Python
- for the sake of consistency -- I apologize.
+ Set the access and modified times of the file specified by ``path``.
+ If ``times`` is ``None``, then the file's access and modified times
+ are set to the current time. Otherwise, ``times`` must be a 2-tuple
+ of numbers, of the form ``(atime, mtime)``, which is used to set the
+ access and modified times, respectively. This bizarre API is mimicked
+ from Python for the sake of consistency -- I apologize.
:param str path: path of the file to modify
:param tuple times:
@@ -507,8 +523,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
method on Python file objects.
:param str path: path of the file to modify
- :param size: the new size of the file
- :type size: int or long
+ :param int size: the new size of the file
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'truncate(%r, %r)' % (path, size))
@@ -547,7 +562,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
:param str path: path to be normalized
:return: normalized form of the given path (as a `str`)
- :raises IOError: if the path can't be resolved on the server
+ :raises: ``IOError`` -- if the path can't be resolved on the server
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'normalize(%r)' % path)
@@ -570,7 +585,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
:param str path: new current working directory
- :raises IOError: if the requested path doesn't exist on the server
+ :raises:
+ ``IOError`` -- if the requested path doesn't exist on the server
.. versionadded:: 1.4
"""
@@ -578,7 +594,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
self._cwd = None
return
if not stat.S_ISDIR(self.stat(path).st_mode):
- raise SFTPError(errno.ENOTDIR, "%s: %s" % (os.strerror(errno.ENOTDIR), path))
+ raise SFTPError(
+ errno.ENOTDIR, "%s: %s" % (os.strerror(errno.ENOTDIR), path))
self._cwd = b(self.normalize(path))
def getcwd(self):
@@ -639,7 +656,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
if confirm:
s = self.stat(remotepath)
if s.st_size != size:
- raise IOError('size mismatch in put! %d != %d' % (s.st_size, size))
+ raise IOError(
+ 'size mismatch in put! %d != %d' % (s.st_size, size))
else:
s = SFTPAttributes()
return s
@@ -663,7 +681,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
whether to do a stat() on the file afterwards to confirm the file
size
- :return: an `.SFTPAttributes` object containing attributes about the given file
+ :return: an `.SFTPAttributes` object containing attributes about the
+ given file
.. versionadded:: 1.4
.. versionchanged:: 1.7.4
@@ -699,8 +718,6 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
reader=fr, writer=fl, file_size=file_size, callback=callback
)
- return size
-
def get(self, remotepath, localpath, callback=None):
"""
Copy a remote file (``remotepath``) from the SFTP server to the local
@@ -721,9 +738,10 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
size = self.getfo(remotepath, fl, callback)
s = os.stat(localpath)
if s.st_size != size:
- raise IOError('size mismatch in get! %d != %d' % (s.st_size, size))
+ raise IOError(
+ 'size mismatch in get! %d != %d' % (s.st_size, size))
- ### internals...
+ # ...internals...
def _request(self, t, *arg):
num = self._async_request(type(None), t, *arg)
@@ -745,7 +763,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
elif isinstance(item, SFTPAttributes):
item._pack(msg)
else:
- raise Exception('unknown type for %r type %r' % (item, type(item)))
+ raise Exception(
+ 'unknown type for %r type %r' % (item, type(item)))
num = self.request_number
self._expecting[num] = fileobj
self.request_number += 1
@@ -765,7 +784,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
self._lock.acquire()
try:
if num not in self._expecting:
- # might be response for a file that was closed before responses came back
+ # might be response for a file that was closed before
+ # responses came back
self._log(DEBUG, 'Unexpected response #%d' % (num,))
if waitfor is None:
# just doing a single check
@@ -780,7 +800,10 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
if t == CMD_STATUS:
self._convert_status(msg)
return t, msg
- if fileobj is not type(None):
+
+ # can not rewrite this to deal with E721, either as a None check
+ # nor as not an instance of None or NoneType
+ if fileobj is not type(None): # noqa
fileobj._async_response(t, msg, num)
if waitfor is None:
# just doing a single check
@@ -828,6 +851,6 @@ class SFTPClient(BaseSFTP, ClosingContextManager):
class SFTP(SFTPClient):
"""
- An alias for `.SFTPClient` for backwards compatability.
+ An alias for `.SFTPClient` for backwards compatibility.
"""
pass
diff --git a/paramiko/sftp_file.py b/paramiko/sftp_file.py
index fdf667cd..337cdbeb 100644
--- a/paramiko/sftp_file.py
+++ b/paramiko/sftp_file.py
@@ -31,8 +31,10 @@ from paramiko.common import DEBUG
from paramiko.file import BufferedFile
from paramiko.py3compat import long
-from paramiko.sftp import CMD_CLOSE, CMD_READ, CMD_DATA, SFTPError, CMD_WRITE, \
- CMD_STATUS, CMD_FSTAT, CMD_ATTRS, CMD_FSETSTAT, CMD_EXTENDED
+from paramiko.sftp import (
+ CMD_CLOSE, CMD_READ, CMD_DATA, SFTPError, CMD_WRITE, CMD_STATUS, CMD_FSTAT,
+ CMD_ATTRS, CMD_FSETSTAT, CMD_EXTENDED,
+)
from paramiko.sftp_attr import SFTPAttributes
@@ -87,7 +89,8 @@ class SFTPFile (BufferedFile):
BufferedFile.close(self)
try:
if async:
- # GC'd file handle could be called from an arbitrary thread -- don't wait for a response
+ # GC'd file handle could be called from an arbitrary thread
+ # -- don't wait for a response
self.sftp._async_request(type(None), CMD_CLOSE, self.handle)
else:
self.sftp._request(CMD_CLOSE, self.handle)
@@ -99,7 +102,8 @@ class SFTPFile (BufferedFile):
pass
def _data_in_prefetch_requests(self, offset, size):
- k = [x for x in list(self._prefetch_extents.values()) if x[0] <= offset]
+ k = [x for x in list(self._prefetch_extents.values())
+ if x[0] <= offset]
if len(k) == 0:
return False
k.sort(key=lambda x: x[0])
@@ -110,8 +114,11 @@ class SFTPFile (BufferedFile):
if buf_offset + buf_size >= offset + size:
# inclusive
return True
- # well, we have part of the request. see if another chunk has the rest.
- return self._data_in_prefetch_requests(buf_offset + buf_size, offset + size - buf_offset - buf_size)
+ # well, we have part of the request. see if another chunk has
+ # the rest.
+ return self._data_in_prefetch_requests(
+ buf_offset + buf_size,
+ offset + size - buf_offset - buf_size)
def _data_in_prefetch_buffers(self, offset):
"""
@@ -135,7 +142,8 @@ class SFTPFile (BufferedFile):
read data out of the prefetch buffer, if possible. if the data isn't
in the buffer, return None. otherwise, behaves like a normal read.
"""
- # while not closed, and haven't fetched past the current position, and haven't reached EOF...
+ # while not closed, and haven't fetched past the current position,
+ # and haven't reached EOF...
while True:
offset = self._data_in_prefetch_buffers(self._realpos)
if offset is not None:
@@ -165,7 +173,12 @@ class SFTPFile (BufferedFile):
data = self._read_prefetch(size)
if data is not None:
return data
- t, msg = self.sftp._request(CMD_READ, self.handle, long(self._realpos), int(size))
+ t, msg = self.sftp._request(
+ CMD_READ,
+ self.handle,
+ long(self._realpos),
+ int(size)
+ )
if t != CMD_DATA:
raise SFTPError('Expected data')
return msg.get_string()
@@ -173,8 +186,18 @@ class SFTPFile (BufferedFile):
def _write(self, data):
# may write less than requested if it would exceed max packet size
chunk = min(len(data), self.MAX_REQUEST_SIZE)
- self._reqs.append(self.sftp._async_request(type(None), CMD_WRITE, self.handle, long(self._realpos), data[:chunk]))
- if not self.pipelined or (len(self._reqs) > 100 and self.sftp.sock.recv_ready()):
+ sftp_async_request = self.sftp._async_request(
+ type(None),
+ CMD_WRITE,
+ self.handle,
+ long(self._realpos),
+ data[:chunk]
+ )
+ self._reqs.append(sftp_async_request)
+ if (
+ not self.pipelined or
+ (len(self._reqs) > 100 and self.sftp.sock.recv_ready())
+ ):
while len(self._reqs):
req = self._reqs.popleft()
t, msg = self.sftp._read_response(req)
@@ -228,6 +251,11 @@ class SFTPFile (BufferedFile):
return True
def seek(self, offset, whence=0):
+ """
+ Set the file's current position.
+
+ See `file.seek` for details.
+ """
self.flush()
if whence == self.SEEK_SET:
self._realpos = self._pos = offset
@@ -244,7 +272,8 @@ class SFTPFile (BufferedFile):
exactly like `.SFTPClient.stat`, except that it operates on an
already-open file.
- :return: an `.SFTPAttributes` object containing attributes about this file.
+ :returns:
+ an `.SFTPAttributes` object containing attributes about this file.
"""
t, msg = self.sftp._request(CMD_FSTAT, self.handle)
if t != CMD_ATTRS:
@@ -274,7 +303,9 @@ class SFTPFile (BufferedFile):
:param int uid: new owner's uid
:param int gid: new group id
"""
- self.sftp._log(DEBUG, 'chown(%s, %r, %r)' % (hexlify(self.handle), uid, gid))
+ self.sftp._log(
+ DEBUG,
+ 'chown(%s, %r, %r)' % (hexlify(self.handle), uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
@@ -282,11 +313,11 @@ class SFTPFile (BufferedFile):
def utime(self, times):
"""
Set the access and modified times of this file. If
- ``times`` is ``None``, then the file's access and modified times are set
- to the current time. Otherwise, ``times`` must be a 2-tuple of numbers,
- of the form ``(atime, mtime)``, which is used to set the access and
- modified times, respectively. This bizarre API is mimicked from Python
- for the sake of consistency -- I apologize.
+ ``times`` is ``None``, then the file's access and modified times are
+ set to the current time. Otherwise, ``times`` must be a 2-tuple of
+ numbers, of the form ``(atime, mtime)``, which is used to set the
+ access and modified times, respectively. This bizarre API is mimicked
+ from Python for the sake of consistency -- I apologize.
:param tuple times:
``None`` or a tuple of (access time, modified time) in standard
@@ -306,9 +337,10 @@ class SFTPFile (BufferedFile):
Python file objects.
:param size: the new size of the file
- :type size: int or long
"""
- self.sftp._log(DEBUG, 'truncate(%s, %r)' % (hexlify(self.handle), size))
+ self.sftp._log(
+ DEBUG,
+ 'truncate(%s, %r)' % (hexlify(self.handle), size))
attr = SFTPAttributes()
attr.st_size = size
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
@@ -319,9 +351,9 @@ class SFTPFile (BufferedFile):
to verify a successful upload or download, or for various rsync-like
operations.
- The file is hashed from ``offset``, for ``length`` bytes. If ``length``
- is 0, the remainder of the file is hashed. Thus, if both ``offset``
- and ``length`` are zero, the entire file is hashed.
+ The file is hashed from ``offset``, for ``length`` bytes.
+ If ``length`` is 0, the remainder of the file is hashed. Thus, if both
+ ``offset`` and ``length`` are zero, the entire file is hashed.
Normally, ``block_size`` will be 0 (the default), and this method will
return a byte string representing the requested hash (for example, a
@@ -342,30 +374,28 @@ class SFTPFile (BufferedFile):
:param offset:
offset into the file to begin hashing (0 means to start from the
beginning)
- :type offset: int or long
:param length:
number of bytes to hash (0 means continue to the end of the file)
- :type length: int or long
:param int block_size:
number of bytes to hash per result (must not be less than 256; 0
means to compute only one hash of the entire segment)
- :type block_size: int
:return:
`str` of bytes representing the hash of each block, concatenated
together
- :raises IOError: if the server doesn't support the "check-file"
- extension, or possibly doesn't support the hash algorithm
- requested
+ :raises:
+ ``IOError`` -- if the server doesn't support the "check-file"
+ extension, or possibly doesn't support the hash algorithm requested
.. note:: Many (most?) servers don't support this extension yet.
.. versionadded:: 1.4
"""
- t, msg = self.sftp._request(CMD_EXTENDED, 'check-file', self.handle,
- hash_algorithm, long(offset), long(length), block_size)
- ext = msg.get_text()
- alg = msg.get_text()
+ t, msg = self.sftp._request(
+ CMD_EXTENDED, 'check-file', self.handle,
+ hash_algorithm, long(offset), long(length), block_size)
+ msg.get_text() # ext
+ msg.get_text() # alg
data = msg.get_remainder()
return data
@@ -417,7 +447,7 @@ class SFTPFile (BufferedFile):
compatibility.
"""
if file_size is None:
- file_size = self.stat().st_size;
+ file_size = self.stat().st_size
# queue up async reads for the rest of the file
chunks = []
@@ -437,9 +467,8 @@ class SFTPFile (BufferedFile):
once.
:param chunks:
- a list of (offset, length) tuples indicating which sections of the
- file to read
- :type chunks: list(tuple(long, int))
+ a list of ``(offset, length)`` tuples indicating which sections of
+ the file to read
:return: a list of blocks read, in the same order as in ``chunks``
.. versionadded:: 1.5.4
@@ -449,7 +478,10 @@ class SFTPFile (BufferedFile):
read_chunks = []
for offset, size in chunks:
# don't fetch data that's already in the prefetch buffer
- if self._data_in_prefetch_buffers(offset) or self._data_in_prefetch_requests(offset, size):
+ if (
+ self._data_in_prefetch_buffers(offset) or
+ self._data_in_prefetch_requests(offset, size)
+ ):
continue
# break up anything larger than the max read size
@@ -465,7 +497,7 @@ class SFTPFile (BufferedFile):
self.seek(x[0])
yield self.read(x[1])
- ### internals...
+ # ...internals...
def _get_size(self):
try:
@@ -485,7 +517,12 @@ class SFTPFile (BufferedFile):
# do these read requests in a temporary thread because there may be
# a lot of them, so it may block.
for offset, length in chunks:
- num = self.sftp._async_request(self, CMD_READ, self.handle, long(offset), int(length))
+ num = self.sftp._async_request(
+ self,
+ CMD_READ,
+ self.handle,
+ long(offset),
+ int(length))
with self._prefetch_lock:
self._prefetch_extents[num] = (offset, length)
diff --git a/paramiko/sftp_handle.py b/paramiko/sftp_handle.py
index edceb5ad..ca473900 100644
--- a/paramiko/sftp_handle.py
+++ b/paramiko/sftp_handle.py
@@ -30,10 +30,10 @@ class SFTPHandle (ClosingContextManager):
Abstract object representing a handle to an open file (or folder) in an
SFTP server implementation. Each handle has a string representation used
by the client to refer to the underlying file.
-
+
Server implementations can (and should) subclass SFTPHandle to implement
features of a file handle, like `stat` or `chattr`.
-
+
Instances of this class may be used as context managers.
"""
def __init__(self, flags=0):
@@ -41,8 +41,9 @@ class SFTPHandle (ClosingContextManager):
Create a new file handle representing a local file being served over
SFTP. If ``flags`` is passed in, it's used to determine if the file
is open in append mode.
-
- :param int flags: optional flags as passed to `.SFTPServerInterface.open`
+
+ :param int flags: optional flags as passed to
+ `.SFTPServerInterface.open`
"""
self.__flags = flags
self.__name = None
@@ -55,7 +56,7 @@ class SFTPHandle (ClosingContextManager):
When a client closes a file, this method is called on the handle.
Normally you would use this method to close the underlying OS level
file object(s).
-
+
The default implementation checks for attributes on ``self`` named
``readfile`` and/or ``writefile``, and if either or both are present,
their ``close()`` methods are called. This means that if you are
@@ -76,7 +77,7 @@ class SFTPHandle (ClosingContextManager):
to be 64 bits.
If the end of the file has been reached, this method may return an
- empty string to signify EOF, or it may also return `.SFTP_EOF`.
+ empty string to signify EOF, or it may also return ``SFTP_EOF``.
The default implementation checks for an attribute on ``self`` named
``readfile``, and if present, performs the read operation on the Python
@@ -84,7 +85,6 @@ class SFTPHandle (ClosingContextManager):
common case where you are wrapping a Python file object.)
:param offset: position in the file to start reading from.
- :type offset: int or long
:param int length: number of bytes to attempt to read.
:return: data read from the file, or an SFTP error code, as a `str`.
"""
@@ -117,11 +117,10 @@ class SFTPHandle (ClosingContextManager):
differently from ``readfile`` to make it easy to implement read-only
(or write-only) files, but if both attributes are present, they should
refer to the same file.
-
+
:param offset: position in the file to start reading from.
- :type offset: int or long
:param str data: data to write into the file.
- :return: an SFTP error code like `.SFTP_OK`.
+ :return: an SFTP error code like ``SFTP_OK``.
"""
writefile = getattr(self, 'writefile', None)
if writefile is None:
@@ -151,7 +150,7 @@ class SFTPHandle (ClosingContextManager):
:return:
an attributes object for the given file, or an SFTP error code
- (like `.SFTP_PERMISSION_DENIED`).
+ (like ``SFTP_PERMISSION_DENIED``).
:rtype: `.SFTPAttributes` or error code
"""
return SFTP_OP_UNSUPPORTED
@@ -163,11 +162,11 @@ class SFTPHandle (ClosingContextManager):
check for the presence of fields before using them.
:param .SFTPAttributes attr: the attributes to change on this file.
- :return: an `int` error code like `.SFTP_OK`.
+ :return: an `int` error code like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
- ### internals...
+ # ...internals...
def _set_files(self, files):
"""
@@ -179,7 +178,7 @@ class SFTPHandle (ClosingContextManager):
def _get_next_files(self):
"""
- Used by the SFTP server code to retreive a cached directory
+ Used by the SFTP server code to retrieve a cached directory
listing.
"""
fnlist = self.__files[:16]
diff --git a/paramiko/sftp_server.py b/paramiko/sftp_server.py
index ce287e8f..1cfe286b 100644
--- a/paramiko/sftp_server.py
+++ b/paramiko/sftp_server.py
@@ -26,8 +26,9 @@ import sys
from hashlib import md5, sha1
from paramiko import util
-from paramiko.sftp import BaseSFTP, Message, SFTP_FAILURE, \
- SFTP_PERMISSION_DENIED, SFTP_NO_SUCH_FILE
+from paramiko.sftp import (
+ BaseSFTP, Message, SFTP_FAILURE, SFTP_PERMISSION_DENIED, SFTP_NO_SUCH_FILE,
+)
from paramiko.sftp_si import SFTPServerInterface
from paramiko.sftp_attr import SFTPAttributes
from paramiko.common import DEBUG
@@ -36,13 +37,15 @@ from paramiko.server import SubsystemHandler
# known hash algorithms for the "check-file" extension
-from paramiko.sftp import CMD_HANDLE, SFTP_DESC, CMD_STATUS, SFTP_EOF, CMD_NAME, \
- SFTP_BAD_MESSAGE, CMD_EXTENDED_REPLY, SFTP_FLAG_READ, SFTP_FLAG_WRITE, \
- SFTP_FLAG_APPEND, SFTP_FLAG_CREATE, SFTP_FLAG_TRUNC, SFTP_FLAG_EXCL, \
- CMD_NAMES, CMD_OPEN, CMD_CLOSE, SFTP_OK, CMD_READ, CMD_DATA, CMD_WRITE, \
- CMD_REMOVE, CMD_RENAME, CMD_MKDIR, CMD_RMDIR, CMD_OPENDIR, CMD_READDIR, \
- CMD_STAT, CMD_ATTRS, CMD_LSTAT, CMD_FSTAT, CMD_SETSTAT, CMD_FSETSTAT, \
- CMD_READLINK, CMD_SYMLINK, CMD_REALPATH, CMD_EXTENDED, SFTP_OP_UNSUPPORTED
+from paramiko.sftp import (
+ CMD_HANDLE, SFTP_DESC, CMD_STATUS, SFTP_EOF, CMD_NAME, SFTP_BAD_MESSAGE,
+ CMD_EXTENDED_REPLY, SFTP_FLAG_READ, SFTP_FLAG_WRITE, SFTP_FLAG_APPEND,
+ SFTP_FLAG_CREATE, SFTP_FLAG_TRUNC, SFTP_FLAG_EXCL, CMD_NAMES, CMD_OPEN,
+ CMD_CLOSE, SFTP_OK, CMD_READ, CMD_DATA, CMD_WRITE, CMD_REMOVE, CMD_RENAME,
+ CMD_MKDIR, CMD_RMDIR, CMD_OPENDIR, CMD_READDIR, CMD_STAT, CMD_ATTRS,
+ CMD_LSTAT, CMD_FSTAT, CMD_SETSTAT, CMD_FSETSTAT, CMD_READLINK, CMD_SYMLINK,
+ CMD_REALPATH, CMD_EXTENDED, SFTP_OP_UNSUPPORTED,
+)
_hash_class = {
'sha1': sha1,
@@ -57,7 +60,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
Use `.Transport.set_subsystem_handler` to activate this class.
"""
- def __init__(self, channel, name, server, sftp_si=SFTPServerInterface, *largs, **kwargs):
+ def __init__(self, channel, name, server, sftp_si=SFTPServerInterface,
+ *largs, **kwargs):
"""
The constructor for SFTPServer is meant to be called from within the
`.Transport` as a subsystem handler. ``server`` and any additional
@@ -68,7 +72,7 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
:param str name: name of the requested subsystem.
:param .ServerInterface server:
the server object associated with this channel and subsystem
- :param class sftp_si:
+ :param sftp_si:
a subclass of `.SFTPServerInterface` to use for handling individual
requests.
"""
@@ -86,9 +90,13 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
def _log(self, level, msg):
if issubclass(type(msg), list):
for m in msg:
- super(SFTPServer, self)._log(level, "[chan " + self.sock.get_name() + "] " + m)
+ super(SFTPServer, self)._log(
+ level,
+ "[chan " + self.sock.get_name() + "] " + m)
else:
- super(SFTPServer, self)._log(level, "[chan " + self.sock.get_name() + "] " + msg)
+ super(SFTPServer, self)._log(
+ level,
+ "[chan " + self.sock.get_name() + "] " + msg)
def start_subsystem(self, name, transport, channel):
self.sock = channel
@@ -121,7 +129,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
def finish_subsystem(self):
self.server.session_ended()
super(SFTPServer, self).finish_subsystem()
- # close any file handles that were left open (so we can return them to the OS quickly)
+ # close any file handles that were left open
+ # (so we can return them to the OS quickly)
for f in self.file_table.values():
f.close()
for f in self.folder_table.values():
@@ -175,7 +184,7 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
with open(filename, 'w+') as f:
f.truncate(attr.st_size)
- ### internals...
+ # ...internals...
def _response(self, request_number, t, *arg):
msg = Message()
@@ -190,7 +199,9 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
elif type(item) is SFTPAttributes:
item._pack(msg)
else:
- raise Exception('unknown type for ' + repr(item) + ' type ' + repr(type(item)))
+ raise Exception(
+ 'unknown type for {0!r} type {1!r}'.format(
+ item, type(item)))
self._send_packet(t, msg)
def _send_handle_response(self, request_number, handle, folder=False):
@@ -212,7 +223,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
desc = SFTP_DESC[code]
except IndexError:
desc = 'Unknown'
- # some clients expect a "langauge" tag at the end (but don't mind it being blank)
+ # some clients expect a "langauge" tag at the end
+ # (but don't mind it being blank)
self._response(request_number, CMD_STATUS, code, desc, '')
def _open_folder(self, request_number, path):
@@ -251,7 +263,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
length = msg.get_int64()
block_size = msg.get_int()
if handle not in self.file_table:
- self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
f = self.file_table[handle]
for x in alg_list:
@@ -260,7 +273,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
alg = _hash_class[x]
break
else:
- self._send_status(request_number, SFTP_FAILURE, 'No supported hash types found')
+ self._send_status(
+ request_number, SFTP_FAILURE, 'No supported hash types found')
return
if length == 0:
st = f.stat()
@@ -271,7 +285,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
if block_size == 0:
block_size = length
if block_size < 256:
- self._send_status(request_number, SFTP_FAILURE, 'Block size too small')
+ self._send_status(
+ request_number, SFTP_FAILURE, 'Block size too small')
return
sum_out = bytes()
@@ -285,7 +300,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
while count < blocklen:
data = f.read(offset, chunklen)
if not isinstance(data, bytes_types):
- self._send_status(request_number, data, 'Unable to hash file')
+ self._send_status(
+ request_number, data, 'Unable to hash file')
return
hash_obj.update(data)
count += len(data)
@@ -323,7 +339,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
path = msg.get_text()
flags = self._convert_pflags(msg.get_int())
attr = SFTPAttributes._from_msg(msg)
- self._send_handle_response(request_number, self.server.open(path, flags, attr))
+ self._send_handle_response(
+ request_number, self.server.open(path, flags, attr))
elif t == CMD_CLOSE:
handle = msg.get_binary()
if handle in self.folder_table:
@@ -335,13 +352,15 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
del self.file_table[handle]
self._send_status(request_number, SFTP_OK)
return
- self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
elif t == CMD_READ:
handle = msg.get_binary()
offset = msg.get_int64()
length = msg.get_int()
if handle not in self.file_table:
- self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
data = self.file_table[handle].read(offset, length)
if isinstance(data, (bytes_types, string_types)):
@@ -356,16 +375,19 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
offset = msg.get_int64()
data = msg.get_binary()
if handle not in self.file_table:
- self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
- self._send_status(request_number, self.file_table[handle].write(offset, data))
+ self._send_status(
+ request_number, self.file_table[handle].write(offset, data))
elif t == CMD_REMOVE:
path = msg.get_text()
self._send_status(request_number, self.server.remove(path))
elif t == CMD_RENAME:
oldpath = msg.get_text()
newpath = msg.get_text()
- self._send_status(request_number, self.server.rename(oldpath, newpath))
+ self._send_status(
+ request_number, self.server.rename(oldpath, newpath))
elif t == CMD_MKDIR:
path = msg.get_text()
attr = SFTPAttributes._from_msg(msg)
@@ -380,7 +402,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
elif t == CMD_READDIR:
handle = msg.get_binary()
if handle not in self.folder_table:
- self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
folder = self.folder_table[handle]
self._read_folder(request_number, folder)
@@ -401,7 +424,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
elif t == CMD_FSTAT:
handle = msg.get_binary()
if handle not in self.file_table:
- self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ self._send_status(
+ request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
resp = self.file_table[handle].stat()
if issubclass(type(resp), SFTPAttributes):
@@ -416,25 +440,31 @@ class SFTPServer (BaseSFTP, SubsystemHandler):
handle = msg.get_binary()
attr = SFTPAttributes._from_msg(msg)
if handle not in self.file_table:
- self._response(request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
+ self._response(
+ request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
- self._send_status(request_number, self.file_table[handle].chattr(attr))
+ self._send_status(
+ request_number, self.file_table[handle].chattr(attr))
elif t == CMD_READLINK:
path = msg.get_text()
resp = self.server.readlink(path)
if isinstance(resp, (bytes_types, string_types)):
- self._response(request_number, CMD_NAME, 1, resp, '', SFTPAttributes())
+ self._response(
+ request_number, CMD_NAME, 1, resp, '', SFTPAttributes())
else:
self._send_status(request_number, resp)
elif t == CMD_SYMLINK:
- # the sftp 2 draft is incorrect here! path always follows target_path
+ # the sftp 2 draft is incorrect here!
+ # path always follows target_path
target_path = msg.get_text()
path = msg.get_text()
- self._send_status(request_number, self.server.symlink(target_path, path))
+ self._send_status(
+ request_number, self.server.symlink(target_path, path))
elif t == CMD_REALPATH:
path = msg.get_text()
rpath = self.server.canonicalize(path)
- self._response(request_number, CMD_NAME, 1, rpath, '', SFTPAttributes())
+ self._response(
+ request_number, CMD_NAME, 1, rpath, '', SFTPAttributes())
elif t == CMD_EXTENDED:
tag = msg.get_text()
if tag == 'check-file':
diff --git a/paramiko/sftp_si.py b/paramiko/sftp_si.py
index 61db956c..09e7025c 100644
--- a/paramiko/sftp_si.py
+++ b/paramiko/sftp_si.py
@@ -35,16 +35,15 @@ class SFTPServerInterface (object):
SFTP sessions). However, raising an exception will usually cause the SFTP
session to abruptly end, so you will usually want to catch exceptions and
return an appropriate error code.
-
+
All paths are in string form instead of unicode because not all SFTP
clients & servers obey the requirement that paths be encoded in UTF-8.
"""
-
def __init__(self, server, *largs, **kwargs):
"""
Create a new SFTPServerInterface object. This method does nothing by
default and is meant to be overridden by subclasses.
-
+
:param .ServerInterface server:
the server object associated with this channel and SFTP subsystem
"""
@@ -73,7 +72,7 @@ class SFTPServerInterface (object):
on that file. On success, a new object subclassed from `.SFTPHandle`
should be returned. This handle will be used for future operations
on the file (read, write, etc). On failure, an error code such as
- `.SFTP_PERMISSION_DENIED` should be returned.
+ ``SFTP_PERMISSION_DENIED`` should be returned.
``flags`` contains the requested mode for opening (read-only,
write-append, etc) as a bitset of flags from the ``os`` module:
@@ -92,7 +91,7 @@ class SFTPServerInterface (object):
The ``attr`` object contains requested attributes of the file if it
has to be created. Some or all attribute fields may be missing if
the client didn't specify them.
-
+
.. note:: The SFTP protocol defines all files to be in "binary" mode.
There is no equivalent to Python's "text" mode.
@@ -121,13 +120,14 @@ class SFTPServerInterface (object):
`.SFTPAttributes.from_stat` will usually do what you want.
In case of an error, you should return one of the ``SFTP_*`` error
- codes, such as `.SFTP_PERMISSION_DENIED`.
+ codes, such as ``SFTP_PERMISSION_DENIED``.
- :param str path: the requested path (relative or absolute) to be listed.
+ :param str path: the requested path (relative or absolute) to be
+ listed.
:return:
a list of the files in the given folder, using `.SFTPAttributes`
objects.
-
+
.. note::
You should normalize the given ``path`` first (see the `os.path`
module) and check appropriate permissions before returning the list
@@ -150,7 +150,7 @@ class SFTPServerInterface (object):
for.
:return:
an `.SFTPAttributes` object for the given file, or an SFTP error
- code (like `.SFTP_PERMISSION_DENIED`).
+ code (like ``SFTP_PERMISSION_DENIED``).
"""
return SFTP_OP_UNSUPPORTED
@@ -168,7 +168,7 @@ class SFTPServerInterface (object):
:type path: str
:return:
an `.SFTPAttributes` object for the given file, or an SFTP error
- code (like `.SFTP_PERMISSION_DENIED`).
+ code (like ``SFTP_PERMISSION_DENIED``).
"""
return SFTP_OP_UNSUPPORTED
@@ -178,7 +178,7 @@ class SFTPServerInterface (object):
:param str path:
the requested path (relative or absolute) of the file to delete.
- :return: an SFTP error code `int` like `.SFTP_OK`.
+ :return: an SFTP error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
@@ -189,7 +189,7 @@ class SFTPServerInterface (object):
and since there's no other (easy) way to move files via SFTP, it's
probably a good idea to implement "move" in this method too, even for
files that cross disk partition boundaries, if at all possible.
-
+
.. note:: You should return an error if a file with the same name as
``newpath`` already exists. (The rename operation should be
non-desctructive.)
@@ -197,7 +197,7 @@ class SFTPServerInterface (object):
:param str oldpath:
the requested path (relative or absolute) of the existing file.
:param str newpath: the requested new path of the file.
- :return: an SFTP error code `int` like `.SFTP_OK`.
+ :return: an SFTP error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
@@ -208,13 +208,13 @@ class SFTPServerInterface (object):
The ``attr`` object will contain only those fields provided by the
client in its request, so you should use ``hasattr`` to check for
- the presense of fields before using them. In some cases, the ``attr``
+ the presence of fields before using them. In some cases, the ``attr``
object may be completely empty.
:param str path:
requested path (relative or absolute) of the new folder.
:param .SFTPAttributes attr: requested attributes of the new folder.
- :return: an SFTP error code `int` like `.SFTP_OK`.
+ :return: an SFTP error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
@@ -226,7 +226,7 @@ class SFTPServerInterface (object):
:param str path:
requested path (relative or absolute) of the folder to remove.
- :return: an SFTP error code `int` like `.SFTP_OK`.
+ :return: an SFTP error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
@@ -241,7 +241,7 @@ class SFTPServerInterface (object):
:param attr:
requested attributes to change on the file (an `.SFTPAttributes`
object)
- :return: an error code `int` like `.SFTP_OK`.
+ :return: an error code `int` like ``SFTP_OK``.
"""
return SFTP_OP_UNSUPPORTED
@@ -267,25 +267,25 @@ class SFTPServerInterface (object):
# on windows, normalize backslashes to sftp/posix format
out = out.replace('\\', '/')
return out
-
+
def readlink(self, path):
"""
Return the target of a symbolic link (or shortcut) on the server.
If the specified path doesn't refer to a symbolic link, an error
should be returned.
-
+
:param str path: path (relative or absolute) of the symbolic link.
:return:
the target `str` path of the symbolic link, or an error code like
- `.SFTP_NO_SUCH_FILE`.
+ ``SFTP_NO_SUCH_FILE``.
"""
return SFTP_OP_UNSUPPORTED
-
+
def symlink(self, target_path, path):
"""
Create a symbolic link on the server, as new pathname ``path``,
with ``target_path`` as the target of the link.
-
+
:param str target_path:
path (relative or absolute) of the target for this new symbolic
link.
diff --git a/paramiko/ssh_exception.py b/paramiko/ssh_exception.py
index ed36a952..e9ab8d66 100644
--- a/paramiko/ssh_exception.py
+++ b/paramiko/ssh_exception.py
@@ -31,11 +31,11 @@ class AuthenticationException (SSHException):
Exception raised when authentication failed for some reason. It may be
possible to retry with different credentials. (Other classes specify more
specific reasons.)
-
+
.. versionadded:: 1.6
"""
pass
-
+
class PasswordRequiredException (AuthenticationException):
"""
@@ -49,15 +49,13 @@ class BadAuthenticationType (AuthenticationException):
Exception raised when an authentication type (like password) is used, but
the server isn't allowing that type. (It may only allow public-key, for
example.)
-
- :ivar list allowed_types:
- list of allowed authentication types provided by the server (possible
- values are: ``"none"``, ``"password"``, and ``"publickey"``).
-
+
.. versionadded:: 1.1
"""
+ #: list of allowed authentication types provided by the server (possible
+ #: values are: ``"none"``, ``"password"``, and ``"publickey"``).
allowed_types = []
-
+
def __init__(self, explanation, types):
AuthenticationException.__init__(self, explanation)
self.allowed_types = types
@@ -65,7 +63,9 @@ class BadAuthenticationType (AuthenticationException):
self.args = (explanation, types, )
def __str__(self):
- return SSHException.__str__(self) + ' (allowed_types=%r)' % self.allowed_types
+ return '{0} (allowed_types={1!r})'.format(
+ SSHException.__str__(self), self.allowed_types
+ )
class PartialAuthentication (AuthenticationException):
@@ -73,7 +73,7 @@ class PartialAuthentication (AuthenticationException):
An internal exception thrown in the case of partial authentication.
"""
allowed_types = []
-
+
def __init__(self, types):
AuthenticationException.__init__(self, 'partial authentication')
self.allowed_types = types
@@ -84,9 +84,9 @@ class PartialAuthentication (AuthenticationException):
class ChannelException (SSHException):
"""
Exception raised when an attempt to open a new `.Channel` fails.
-
- :ivar int code: the error code returned by the server
-
+
+ :param int code: the error code returned by the server
+
.. versionadded:: 1.6
"""
def __init__(self, code, text):
@@ -99,19 +99,19 @@ class ChannelException (SSHException):
class BadHostKeyException (SSHException):
"""
The host key given by the SSH server did not match what we were expecting.
-
- :ivar str hostname: the hostname of the SSH server
- :ivar PKey got_key: the host key presented by the server
- :ivar PKey expected_key: the host key expected
-
+
+ :param str hostname: the hostname of the SSH server
+ :param PKey got_key: the host key presented by the server
+ :param PKey expected_key: the host key expected
+
.. versionadded:: 1.6
"""
def __init__(self, hostname, got_key, expected_key):
- SSHException.__init__(self,
- 'Host key for server %s does not match : got %s expected %s' % (
- hostname,
- got_key.get_base64(),
- expected_key.get_base64()))
+ message = 'Host key for server {0} does not match: got {1}, expected {2}' # noqa
+ message = message.format(
+ hostname, got_key.get_base64(),
+ expected_key.get_base64())
+ SSHException.__init__(self, message)
self.hostname = hostname
self.key = got_key
self.expected_key = expected_key
@@ -123,8 +123,8 @@ class ProxyCommandFailure (SSHException):
"""
The "ProxyCommand" found in the .ssh/config file returned an error.
- :ivar str command: The command line that is generating this exception.
- :ivar str error: The error captured from the proxy command output.
+ :param str command: The command line that is generating this exception.
+ :param str error: The error captured from the proxy command output.
"""
def __init__(self, command, error):
SSHException.__init__(self,
@@ -147,7 +147,7 @@ class NoValidConnectionsError(socket.error):
`socket.error` subclass, message, etc) we expose a single unified error
message and a ``None`` errno so that instances of this class match most
normal handling of `socket.error` objects.
-
+
To see the wrapped exception objects, access the ``errors`` attribute.
``errors`` is a dict whose keys are address tuples (e.g. ``('127.0.0.1',
22)``) and whose values are the exception encountered trying to connect to
diff --git a/paramiko/ssh_gss.py b/paramiko/ssh_gss.py
index e906a851..414485f9 100644
--- a/paramiko/ssh_gss.py
+++ b/paramiko/ssh_gss.py
@@ -72,9 +72,8 @@ def GSSAuth(auth_method, gss_deleg_creds=True):
We delegate credentials by default.
:return: Either an `._SSH_GSSAPI` (Unix) object or an
`_SSH_SSPI` (Windows) object
- :rtype: Object
- :raise ImportError: If no GSS-API / SSPI module could be imported.
+ :raises: ``ImportError`` -- If no GSS-API / SSPI module could be imported.
:see: `RFC 4462 <http://www.ietf.org/rfc/rfc4462.txt>`_
:note: Check for the available API and return either an `._SSH_GSSAPI`
@@ -131,7 +130,6 @@ class _SSH_GSSAuth(object):
as the only service value.
:param str service: The desired SSH service
- :rtype: Void
"""
if service.find("ssh-"):
self._service = service
@@ -142,7 +140,6 @@ class _SSH_GSSAuth(object):
username is not set by C{ssh_init_sec_context}.
:param str username: The name of the user who attempts to login
- :rtype: Void
"""
self._username = username
@@ -155,7 +152,6 @@ class _SSH_GSSAuth(object):
:return: A byte sequence containing the number of supported
OIDs, the length of the OID and the actual OID encoded with
DER
- :rtype: Bytes
:note: In server mode we just return the OID length and the DER encoded
OID.
"""
@@ -172,7 +168,6 @@ class _SSH_GSSAuth(object):
:param str desired_mech: The desired GSS-API mechanism of the client
:return: ``True`` if the given OID is supported, otherwise C{False}
- :rtype: Boolean
"""
mech, __ = decoder.decode(desired_mech)
if mech.__str__() != self._krb5_mech:
@@ -180,14 +175,13 @@ class _SSH_GSSAuth(object):
return True
# Internals
- #--------------------------------------------------------------------------
+ # -------------------------------------------------------------------------
def _make_uint32(self, integer):
"""
Create a 32 bit unsigned integer (The byte sequence of an integer).
:param int integer: The integer value to convert
:return: The byte sequence of an 32 bit integer
- :rtype: Bytes
"""
return struct.pack("!I", integer)
@@ -207,7 +201,6 @@ class _SSH_GSSAuth(object):
string service (ssh-connection),
string authentication-method
(gssapi-with-mic or gssapi-keyex)
- :rtype: Bytes
"""
mic = self._make_uint32(len(session_id))
mic += session_id
@@ -256,11 +249,11 @@ class _SSH_GSSAPI(_SSH_GSSAuth):
("pseudo negotiated" mechanism, because we
support just the krb5 mechanism :-))
:param str recv_token: The GSS-API token received from the Server
- :raise SSHException: Is raised if the desired mechanism of the client
- is not supported
- :return: A ``String`` if the GSS-API has returned a token or ``None`` if
- no token was returned
- :rtype: String or None
+ :raises:
+ `.SSHException` -- Is raised if the desired mechanism of the client
+ is not supported
+ :return: A ``String`` if the GSS-API has returned a token or
+ ``None`` if no token was returned
"""
self._username = username
self._gss_host = target
@@ -286,8 +279,9 @@ class _SSH_GSSAPI(_SSH_GSSAuth):
else:
token = self._gss_ctxt.step(recv_token)
except gssapi.GSSException:
- raise gssapi.GSSException("{0} Target: {1}".format(sys.exc_info()[1],
- self._gss_host))
+ message = "{0} Target: {1}".format(
+ sys.exc_info()[1], self._gss_host)
+ raise gssapi.GSSException(message)
self._gss_ctxt_status = self._gss_ctxt.established
return token
@@ -303,8 +297,6 @@ class _SSH_GSSAPI(_SSH_GSSAuth):
gssapi-keyex:
Returns the MIC token from GSS-API with the SSH session ID as
message.
- :rtype: String
- :see: `._ssh_build_mic`
"""
self._session_id = session_id
if not gss_kex:
@@ -328,7 +320,6 @@ class _SSH_GSSAPI(_SSH_GSSAuth):
if it's not the initial call.
:return: A ``String`` if the GSS-API has returned a token or ``None``
if no token was returned
- :rtype: String or None
"""
# hostname and username are not required for GSSAPI, but for SSPI
self._gss_host = hostname
@@ -347,7 +338,7 @@ class _SSH_GSSAPI(_SSH_GSSAuth):
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
- :raises gssapi.GSSException: if the MIC check failed
+ :raises: ``gssapi.GSSException`` -- if the MIC check failed
"""
self._session_id = session_id
self._username = username
@@ -370,7 +361,6 @@ class _SSH_GSSAPI(_SSH_GSSAuth):
Checks if credentials are delegated (server mode).
:return: ``True`` if credentials are delegated, otherwise ``False``
- :rtype: bool
"""
if self._gss_srv_ctxt.delegated_cred is not None:
return True
@@ -383,8 +373,9 @@ class _SSH_GSSAPI(_SSH_GSSAuth):
(server mode).
:param str client_token: The GSS-API token received form the client
- :raise NotImplementedError: Credential delegation is currently not
- supported in server mode
+ :raises:
+ ``NotImplementedError`` -- Credential delegation is currently not
+ supported in server mode
"""
raise NotImplementedError
@@ -404,12 +395,16 @@ class _SSH_SSPI(_SSH_GSSAuth):
_SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds)
if self._gss_deleg_creds:
- self._gss_flags = sspicon.ISC_REQ_INTEGRITY |\
- sspicon.ISC_REQ_MUTUAL_AUTH |\
- sspicon.ISC_REQ_DELEGATE
+ self._gss_flags = (
+ sspicon.ISC_REQ_INTEGRITY |
+ sspicon.ISC_REQ_MUTUAL_AUTH |
+ sspicon.ISC_REQ_DELEGATE
+ )
else:
- self._gss_flags = sspicon.ISC_REQ_INTEGRITY |\
- sspicon.ISC_REQ_MUTUAL_AUTH
+ self._gss_flags = (
+ sspicon.ISC_REQ_INTEGRITY |
+ sspicon.ISC_REQ_MUTUAL_AUTH
+ )
def ssh_init_sec_context(self, target, desired_mech=None,
username=None, recv_token=None):
@@ -422,11 +417,11 @@ class _SSH_SSPI(_SSH_GSSAuth):
("pseudo negotiated" mechanism, because we
support just the krb5 mechanism :-))
:param recv_token: The SSPI token received from the Server
- :raise SSHException: Is raised if the desired mechanism of the client
- is not supported
+ :raises:
+ `.SSHException` -- Is raised if the desired mechanism of the client
+ is not supported
:return: A ``String`` if the SSPI has returned a token or ``None`` if
no token was returned
- :rtype: String or None
"""
self._username = username
self._gss_host = target
@@ -471,8 +466,6 @@ class _SSH_SSPI(_SSH_GSSAuth):
gssapi-keyex:
Returns the MIC token from SSPI with the SSH session ID as
message.
- :rtype: String
- :see: `._ssh_build_mic`
"""
self._session_id = session_id
if not gss_kex:
@@ -496,7 +489,6 @@ class _SSH_SSPI(_SSH_GSSAuth):
if it's not the initial call.
:return: A ``String`` if the SSPI has returned a token or ``None`` if
no token was returned
- :rtype: String or None
"""
self._gss_host = hostname
self._username = username
@@ -517,7 +509,7 @@ class _SSH_SSPI(_SSH_GSSAuth):
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:return: None if the MIC check was successful
- :raises sspi.error: if the MIC check failed
+ :raises: ``sspi.error`` -- if the MIC check failed
"""
self._session_id = session_id
self._username = username
@@ -527,13 +519,13 @@ class _SSH_SSPI(_SSH_GSSAuth):
self._username,
self._service,
self._auth_method)
- # Verifies data and its signature. If verification fails, an
+ # Verifies data and its signature. If verification fails, an
# sspi.error will be raised.
self._gss_srv_ctxt.verify(mic_field, mic_token)
else:
# for key exchange with gssapi-keyex
# client mode
- # Verifies data and its signature. If verification fails, an
+ # Verifies data and its signature. If verification fails, an
# sspi.error will be raised.
self._gss_ctxt.verify(self._session_id, mic_token)
@@ -543,13 +535,11 @@ class _SSH_SSPI(_SSH_GSSAuth):
Checks if credentials are delegated (server mode).
:return: ``True`` if credentials are delegated, otherwise ``False``
- :rtype: Boolean
"""
return (
- self._gss_flags & sspicon.ISC_REQ_DELEGATE
- ) and (
- self._gss_srv_ctxt_status or (self._gss_flags)
- )
+ self._gss_flags & sspicon.ISC_REQ_DELEGATE and
+ (self._gss_srv_ctxt_status or self._gss_flags)
+ )
def save_client_creds(self, client_token):
"""
@@ -558,7 +548,8 @@ class _SSH_SSPI(_SSH_GSSAuth):
(server mode).
:param str client_token: The SSPI token received form the client
- :raise NotImplementedError: Credential delegation is currently not
- supported in server mode
+ :raises:
+ ``NotImplementedError`` -- Credential delegation is currently not
+ supported in server mode
"""
raise NotImplementedError
diff --git a/paramiko/transport.py b/paramiko/transport.py
index d6acb4aa..4a3ae8f4 100644
--- a/paramiko/transport.py
+++ b/paramiko/transport.py
@@ -37,34 +37,37 @@ from paramiko import util
from paramiko.auth_handler import AuthHandler
from paramiko.ssh_gss import GSSAuth
from paramiko.channel import Channel
-from paramiko.common import xffffffff, cMSG_CHANNEL_OPEN, cMSG_IGNORE, \
- cMSG_GLOBAL_REQUEST, DEBUG, MSG_KEXINIT, MSG_IGNORE, MSG_DISCONNECT, \
- MSG_DEBUG, ERROR, WARNING, cMSG_UNIMPLEMENTED, INFO, cMSG_KEXINIT, \
- cMSG_NEWKEYS, MSG_NEWKEYS, cMSG_REQUEST_SUCCESS, cMSG_REQUEST_FAILURE, \
- CONNECTION_FAILED_CODE, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, \
- OPEN_SUCCEEDED, cMSG_CHANNEL_OPEN_FAILURE, cMSG_CHANNEL_OPEN_SUCCESS, \
- MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE, \
- MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, MSG_CHANNEL_OPEN, \
- MSG_CHANNEL_SUCCESS, MSG_CHANNEL_FAILURE, MSG_CHANNEL_DATA, \
- MSG_CHANNEL_EXTENDED_DATA, MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_REQUEST, \
- MSG_CHANNEL_EOF, MSG_CHANNEL_CLOSE, MIN_WINDOW_SIZE, MIN_PACKET_SIZE, \
- MAX_WINDOW_SIZE, DEFAULT_WINDOW_SIZE, DEFAULT_MAX_PACKET_SIZE
+from paramiko.common import (
+ xffffffff, cMSG_CHANNEL_OPEN, cMSG_IGNORE, cMSG_GLOBAL_REQUEST, DEBUG,
+ MSG_KEXINIT, MSG_IGNORE, MSG_DISCONNECT, MSG_DEBUG, ERROR, WARNING,
+ cMSG_UNIMPLEMENTED, INFO, cMSG_KEXINIT, cMSG_NEWKEYS, MSG_NEWKEYS,
+ cMSG_REQUEST_SUCCESS, cMSG_REQUEST_FAILURE, CONNECTION_FAILED_CODE,
+ OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_SUCCEEDED,
+ cMSG_CHANNEL_OPEN_FAILURE, cMSG_CHANNEL_OPEN_SUCCESS, MSG_GLOBAL_REQUEST,
+ MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE, MSG_CHANNEL_OPEN_SUCCESS,
+ MSG_CHANNEL_OPEN_FAILURE, MSG_CHANNEL_OPEN, MSG_CHANNEL_SUCCESS,
+ MSG_CHANNEL_FAILURE, MSG_CHANNEL_DATA, MSG_CHANNEL_EXTENDED_DATA,
+ MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_REQUEST, MSG_CHANNEL_EOF,
+ MSG_CHANNEL_CLOSE, MIN_WINDOW_SIZE, MIN_PACKET_SIZE, MAX_WINDOW_SIZE,
+ DEFAULT_WINDOW_SIZE, DEFAULT_MAX_PACKET_SIZE,
+)
from paramiko.compress import ZlibCompressor, ZlibDecompressor
from paramiko.dsskey import DSSKey
from paramiko.kex_gex import KexGex, KexGexSHA256
from paramiko.kex_group1 import KexGroup1
from paramiko.kex_group14 import KexGroup14
-from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14, NullHostKey
+from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14
from paramiko.message import Message
from paramiko.packet import Packetizer, NeedRekeyException
from paramiko.primes import ModulusPack
-from paramiko.py3compat import string_types, long, byte_ord, b, input
+from paramiko.py3compat import string_types, long, byte_ord, b, input, PY2
from paramiko.rsakey import RSAKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.server import ServerInterface
from paramiko.sftp_client import SFTPClient
-from paramiko.ssh_exception import (SSHException, BadAuthenticationType,
- ChannelException, ProxyCommandFailure)
+from paramiko.ssh_exception import (
+ SSHException, BadAuthenticationType, ChannelException, ProxyCommandFailure,
+)
from paramiko.util import retry_on_signal, ClosingContextManager, clamp_value
@@ -79,7 +82,7 @@ import atexit
atexit.register(_join_lingering_threads)
-class Transport (threading.Thread, ClosingContextManager):
+class Transport(threading.Thread, ClosingContextManager):
"""
An SSH Transport attaches to a stream (usually a socket), negotiates an
encrypted session, authenticates, and then creates stream tunnels, called
@@ -116,11 +119,13 @@ class Transport (threading.Thread, ClosingContextManager):
'hmac-sha1',
)
_preferred_keys = (
+ 'ecdsa-sha2-nistp256',
+ 'ecdsa-sha2-nistp384',
+ 'ecdsa-sha2-nistp521',
'ssh-rsa',
'ssh-dss',
- 'ecdsa-sha2-nistp256',
)
- _preferred_kex = (
+ _preferred_kex = (
'diffie-hellman-group1-sha1',
'diffie-hellman-group14-sha1',
'diffie-hellman-group-exchange-sha1',
@@ -193,6 +198,8 @@ class Transport (threading.Thread, ClosingContextManager):
'ssh-rsa': RSAKey,
'ssh-dss': DSSKey,
'ecdsa-sha2-nistp256': ECDSAKey,
+ 'ecdsa-sha2-nistp384': ECDSAKey,
+ 'ecdsa-sha2-nistp521': ECDSAKey,
}
_kex_info = {
@@ -215,6 +222,7 @@ class Transport (threading.Thread, ClosingContextManager):
}
_modulus_pack = None
+ _active_check_timeout = 0.1
def __init__(self,
sock,
@@ -224,8 +232,8 @@ class Transport (threading.Thread, ClosingContextManager):
gss_deleg_creds=True):
"""
Create a new SSH session over an existing socket, or socket-like
- object. This only creates the `.Transport` object; it doesn't begin the
- SSH session yet. Use `connect` or `start_client` to begin a client
+ object. This only creates the `.Transport` object; it doesn't begin
+ the SSH session yet. Use `connect` or `start_client` to begin a client
session, or `start_server` to begin a server session.
If the object is not actually a socket, it must have the following
@@ -267,6 +275,7 @@ class Transport (threading.Thread, ClosingContextManager):
arguments.
"""
self.active = False
+ self._sshclient = None
if isinstance(sock, string_types):
# convert "host:port" into (host, port)
@@ -279,10 +288,13 @@ class Transport (threading.Thread, ClosingContextManager):
# connect to the given (host, port)
hostname, port = sock
reason = 'No suitable address family'
- for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
+ addrinfos = socket.getaddrinfo(
+ hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM
+ )
+ for family, socktype, proto, canonname, sockaddr in addrinfos:
if socktype == socket.SOCK_STREAM:
af = family
- addr = sockaddr
+ # addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
@@ -302,7 +314,7 @@ class Transport (threading.Thread, ClosingContextManager):
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never
# propagated.
- self.sock.settimeout(0.1)
+ self.sock.settimeout(self._active_check_timeout)
except AttributeError:
pass
@@ -342,7 +354,8 @@ class Transport (threading.Thread, ClosingContextManager):
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
- self.lock = threading.Lock() # synchronization (always higher level than write_lock)
+ # synchronization (always higher level than write_lock)
+ self.lock = threading.Lock()
# tracking open channels
self._channels = ChannelMap()
@@ -363,11 +376,15 @@ class Transport (threading.Thread, ClosingContextManager):
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
- self.global_response = None # response Message from an arbitrary global request
- self.completion_event = None # user-defined event callbacks
- self.banner_timeout = 15 # how long (seconds) to wait for the SSH banner
- self.handshake_timeout = 15 # how long (seconds) to wait for the handshake to finish after SSH banner sent.
-
+ # response Message from an arbitrary global request
+ self.global_response = None
+ # user-defined event callbacks
+ self.completion_event = None
+ # how long (seconds) to wait for the SSH banner
+ self.banner_timeout = 15
+ # how long (seconds) to wait for the handshake to finish after SSH
+ # banner sent.
+ self.handshake_timeout = 15
# server mode:
self.server_mode = False
@@ -386,8 +403,10 @@ class Transport (threading.Thread, ClosingContextManager):
out += ' (unconnected)'
else:
if self.local_cipher != '':
- out += ' (cipher %s, %d bits)' % (self.local_cipher,
- self._cipher_info[self.local_cipher]['key-size'] * 8)
+ out += ' (cipher %s, %d bits)' % (
+ self.local_cipher,
+ self._cipher_info[self.local_cipher]['key-size'] * 8
+ )
if self.is_authenticated():
out += ' (active; %d open channel(s))' % len(self._channels)
elif self.initial_kex_done:
@@ -425,7 +444,6 @@ class Transport (threading.Thread, ClosingContextManager):
:param str gss_host: The targets name in the kerberos database
Default: The name of the host to connect to
- :rtype: Void
"""
# We need the FQDN to get this working with SSPI
self.gss_host = socket.getfqdn(gss_host)
@@ -458,8 +476,9 @@ class Transport (threading.Thread, ClosingContextManager):
:param .threading.Event event:
an event to trigger when negotiation is complete (optional)
- :raises SSHException: if negotiation fails (and no ``event`` was passed
- in)
+ :raises:
+ `.SSHException` -- if negotiation fails (and no ``event`` was
+ passed in)
"""
self.active = True
if event is not None:
@@ -492,7 +511,7 @@ class Transport (threading.Thread, ClosingContextManager):
be triggered. On failure, `is_active` will return ``False``.
(Since 1.4) If ``event`` is ``None``, this method will not return until
- negotation is done. On success, the method returns normally.
+ negotiation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
@@ -519,8 +538,9 @@ class Transport (threading.Thread, ClosingContextManager):
an object used to perform authentication and create `channels
<.Channel>`
- :raises SSHException: if negotiation fails (and no ``event`` was passed
- in)
+ :raises:
+ `.SSHException` -- if negotiation fails (and no ``event`` was
+ passed in)
"""
if server is None:
server = ServerInterface()
@@ -622,6 +642,9 @@ class Transport (threading.Thread, ClosingContextManager):
Transport._modulus_pack = None
return False
+ def set_sshclient(self, sshclient):
+ self._sshclient = sshclient
+
def close(self):
"""
Close this session, and any open channels that are tied to it.
@@ -632,6 +655,7 @@ class Transport (threading.Thread, ClosingContextManager):
for chan in list(self._channels.values()):
chan._unlink()
self.sock.close()
+ self._sshclient = None
def get_remote_server_key(self):
"""
@@ -642,7 +666,7 @@ class Transport (threading.Thread, ClosingContextManager):
string)``. You can get the same effect by calling `.PKey.get_name`
for the key type, and ``str(key)`` for the key string.
- :raises SSHException: if no session is currently active.
+ :raises: `.SSHException` -- if no session is currently active.
:return: public key (`.PKey`) of the remote server
"""
@@ -660,7 +684,12 @@ class Transport (threading.Thread, ClosingContextManager):
"""
return self.active
- def open_session(self, window_size=None, max_packet_size=None, timeout=None):
+ def open_session(
+ self,
+ window_size=None,
+ max_packet_size=None,
+ timeout=None,
+ ):
"""
Request a new channel to the server, of type ``"session"``. This is
just an alias for calling `open_channel` with an argument of
@@ -677,9 +706,12 @@ class Transport (threading.Thread, ClosingContextManager):
:return: a new `.Channel`
- :raises SSHException: if the request is rejected or the session ends
+ :raises:
+ `.SSHException` -- if the request is rejected or the session ends
prematurely
+ .. versionchanged:: 1.13.4/1.14.3/1.15.3
+ Added the ``timeout`` argument.
.. versionchanged:: 1.15
Added the ``window_size`` and ``max_packet_size`` arguments.
"""
@@ -698,7 +730,8 @@ class Transport (threading.Thread, ClosingContextManager):
x11 port, ie. 6010)
:return: a new `.Channel`
- :raises SSHException: if the request is rejected or the session ends
+ :raises:
+ `.SSHException` -- if the request is rejected or the session ends
prematurely
"""
return self.open_channel('x11', src_addr=src_addr)
@@ -712,14 +745,15 @@ class Transport (threading.Thread, ClosingContextManager):
:return: a new `.Channel`
- :raises SSHException:
+ :raises: `.SSHException` --
if the request is rejected or the session ends prematurely
"""
return self.open_channel('auth-agent@openssh.com')
def open_forwarded_tcpip_channel(self, src_addr, dest_addr):
"""
- Request a new channel back to the client, of type ``"forwarded-tcpip"``.
+ Request a new channel back to the client, of type ``forwarded-tcpip``.
+
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
@@ -763,7 +797,8 @@ class Transport (threading.Thread, ClosingContextManager):
:return: a new `.Channel` on success
- :raises SSHException: if the request is rejected, the session ends
+ :raises:
+ `.SSHException` -- if the request is rejected, the session ends
prematurely or there is a timeout openning a channel
.. versionchanged:: 1.15
@@ -828,7 +863,11 @@ class Transport (threading.Thread, ClosingContextManager):
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
- handler(channel, (origin_addr, origin_port), (server_addr, server_port))
+ handler(
+ channel,
+ (origin_addr, origin_port),
+ (server_addr, server_port),
+ )
where ``server_addr`` and ``server_port`` are the address and port that
the server was listening on.
@@ -846,20 +885,23 @@ class Transport (threading.Thread, ClosingContextManager):
:return: the port number (`int`) allocated by the server
- :raises SSHException: if the server refused the TCP forward request
+ :raises:
+ `.SSHException` -- if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
port = int(port)
- response = self.global_request('tcpip-forward', (address, port), wait=True)
+ response = self.global_request(
+ 'tcpip-forward', (address, port), wait=True
+ )
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, src_addr, dest_addr_port):
- #src_addr, src_port = src_addr_port
- #dest_addr, dest_port = dest_addr_port
+ # src_addr, src_port = src_addr_port
+ # dest_addr, dest_port = dest_addr_port
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
@@ -918,8 +960,9 @@ class Transport (threading.Thread, ClosingContextManager):
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
- :raises SSHException: if the key renegotiation failed (which causes the
- session to end)
+ :raises:
+ `.SSHException` -- if the key renegotiation failed (which causes
+ the session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
@@ -945,8 +988,9 @@ class Transport (threading.Thread, ClosingContextManager):
seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
"""
- self.packetizer.set_keepalive(interval,
- lambda x=weakref.proxy(self): x.global_request('keepalive@lag.net', wait=False))
+ def _request(x=weakref.proxy(self)):
+ return x.global_request('keepalive@lag.net', wait=False)
+ self.packetizer.set_keepalive(interval, _request)
def global_request(self, kind, data=None, wait=True):
"""
@@ -988,8 +1032,8 @@ class Transport (threading.Thread, ClosingContextManager):
def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
- server mode. If no channel is opened before the given timeout, ``None``
- is returned.
+ server mode. If no channel is opened before the given timeout,
+ ``None`` is returned.
:param int timeout:
seconds to wait for a channel, or ``None`` to wait forever
@@ -1010,8 +1054,17 @@ class Transport (threading.Thread, ClosingContextManager):
self.lock.release()
return chan
- def connect(self, hostkey=None, username='', password=None, pkey=None,
- gss_host=None, gss_auth=False, gss_kex=False, gss_deleg_creds=True):
+ def connect(
+ self,
+ hostkey=None,
+ username='',
+ password=None,
+ pkey=None,
+ gss_host=None,
+ gss_auth=False,
+ gss_kex=False,
+ gss_deleg_creds=True,
+ ):
"""
Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
@@ -1050,7 +1103,7 @@ class Transport (threading.Thread, ClosingContextManager):
:param bool gss_deleg_creds:
Whether to delegate GSS-API client credentials.
- :raises SSHException: if the SSH2 negotiation fails, the host key
+ :raises: `.SSHException` -- if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails.
"""
if hostkey is not None:
@@ -1063,16 +1116,23 @@ class Transport (threading.Thread, ClosingContextManager):
# the host key.
if (hostkey is not None) and not gss_kex:
key = self.get_remote_server_key()
- if (key.get_name() != hostkey.get_name()) or (key.asbytes() != hostkey.asbytes()):
+ if (
+ key.get_name() != hostkey.get_name() or
+ key.asbytes() != hostkey.asbytes()
+ ):
self._log(DEBUG, 'Bad host key from server')
- self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(hostkey.asbytes())))
- self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(key.asbytes())))
+ self._log(DEBUG, 'Expected: %s: %s' % (
+ hostkey.get_name(), repr(hostkey.asbytes()))
+ )
+ self._log(DEBUG, 'Got : %s: %s' % (
+ key.get_name(), repr(key.asbytes()))
+ )
raise SSHException('Bad host key from server')
self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name())
if (pkey is not None) or (password is not None) or gss_auth or gss_kex:
if gss_auth:
- self._log(DEBUG, 'Attempting GSS-API auth... (gssapi-with-mic)')
+ self._log(DEBUG, 'Attempting GSS-API auth... (gssapi-with-mic)') # noqa
self.auth_gssapi_with_mic(username, gss_host, gss_deleg_creds)
elif gss_kex:
self._log(DEBUG, 'Attempting GSS-API auth... (gssapi-keyex)')
@@ -1117,7 +1177,7 @@ class Transport (threading.Thread, ClosingContextManager):
passed to the `.SubsystemHandler` constructor later.
:param str name: name of the subsystem.
- :param class handler:
+ :param handler:
subclass of `.SubsystemHandler` that handles this subsystem.
"""
try:
@@ -1135,7 +1195,11 @@ class Transport (threading.Thread, ClosingContextManager):
successfully; False if authentication failed and/or the session is
closed.
"""
- return self.active and (self.auth_handler is not None) and self.auth_handler.is_authenticated()
+ return (
+ self.active and
+ self.auth_handler is not None and
+ self.auth_handler.is_authenticated()
+ )
def get_username(self):
"""
@@ -1174,9 +1238,11 @@ class Transport (threading.Thread, ClosingContextManager):
`list` of auth types permissible for the next stage of
authentication (normally empty)
- :raises BadAuthenticationType: if "none" authentication isn't allowed
+ :raises:
+ `.BadAuthenticationType` -- if "none" authentication isn't allowed
by the server for this user
- :raises SSHException: if the authentication failed due to a network
+ :raises:
+ `.SSHException` -- if the authentication failed due to a network
error
.. versionadded:: 1.5
@@ -1227,14 +1293,17 @@ class Transport (threading.Thread, ClosingContextManager):
`list` of auth types permissible for the next stage of
authentication (normally empty)
- :raises BadAuthenticationType: if password authentication isn't
+ :raises:
+ `.BadAuthenticationType` -- if password authentication isn't
allowed by the server for this user (and no event was passed in)
- :raises AuthenticationException: if the authentication failed (and no
+ :raises:
+ `.AuthenticationException` -- if the authentication failed (and no
event was passed in)
- :raises SSHException: if there was a network error
+ :raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
- # we should never try to send the password unless we're on a secure link
+ # we should never try to send the password unless we're on a secure
+ # link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
@@ -1248,7 +1317,8 @@ class Transport (threading.Thread, ClosingContextManager):
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType as e:
- # if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it
+ # if password auth isn't allowed, but keyboard-interactive *is*,
+ # try to fudge it
if not fallback or ('keyboard-interactive' not in e.allowed_types):
raise
try:
@@ -1294,11 +1364,13 @@ class Transport (threading.Thread, ClosingContextManager):
`list` of auth types permissible for the next stage of
authentication (normally empty)
- :raises BadAuthenticationType: if public-key authentication isn't
+ :raises:
+ `.BadAuthenticationType` -- if public-key authentication isn't
allowed by the server for this user (and no event was passed in)
- :raises AuthenticationException: if the authentication failed (and no
+ :raises:
+ `.AuthenticationException` -- if the authentication failed (and no
event was passed in)
- :raises SSHException: if there was a network error
+ :raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
@@ -1350,10 +1422,10 @@ class Transport (threading.Thread, ClosingContextManager):
`list` of auth types permissible for the next stage of
authentication (normally empty).
- :raises BadAuthenticationType: if public-key authentication isn't
+ :raises: `.BadAuthenticationType` -- if public-key authentication isn't
allowed by the server for this user
- :raises AuthenticationException: if the authentication failed
- :raises SSHException: if there was a network error
+ :raises: `.AuthenticationException` -- if the authentication failed
+ :raises: `.SSHException` -- if there was a network error
.. versionadded:: 1.5
"""
@@ -1362,7 +1434,9 @@ class Transport (threading.Thread, ClosingContextManager):
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
- self.auth_handler.auth_interactive(username, handler, my_event, submethods)
+ self.auth_handler.auth_interactive(
+ username, handler, my_event, submethods
+ )
return self.auth_handler.wait_for_response(my_event)
def auth_interactive_dumb(self, username, handler=None, submethods=''):
@@ -1380,8 +1454,8 @@ class Transport (threading.Thread, ClosingContextManager):
print(title.strip())
if instructions:
print(instructions.strip())
- for prompt,show_input in prompt_list:
- print(prompt.strip(),end=' ')
+ for prompt, show_input in prompt_list:
+ print(prompt.strip(), end=' ')
answers.append(input())
return answers
return self.auth_interactive(username, handler, submethods)
@@ -1396,36 +1470,37 @@ class Transport (threading.Thread, ClosingContextManager):
:return: list of auth types permissible for the next stage of
authentication (normally empty)
:rtype: list
- :raise BadAuthenticationType: if gssapi-with-mic isn't
+ :raises: `.BadAuthenticationType` -- if gssapi-with-mic isn't
allowed by the server (and no event was passed in)
- :raise AuthenticationException: if the authentication failed (and no
+ :raises:
+ `.AuthenticationException` -- if the authentication failed (and no
event was passed in)
- :raise SSHException: if there was a network error
+ :raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
- self.auth_handler.auth_gssapi_with_mic(username, gss_host, gss_deleg_creds, my_event)
+ self.auth_handler.auth_gssapi_with_mic(
+ username, gss_host, gss_deleg_creds, my_event
+ )
return self.auth_handler.wait_for_response(my_event)
def auth_gssapi_keyex(self, username):
"""
- Authenticate to the Server with GSS-API / SSPI if GSS-API Key Exchange
- was the used key exchange method.
+ Authenticate to the server with GSS-API/SSPI if GSS-API kex is in use.
- :param str username: The username to authenticate as
- :param str gss_host: The target host
- :param bool gss_deleg_creds: Delegate credentials or not
- :return: list of auth types permissible for the next stage of
- authentication (normally empty)
- :rtype: list
- :raise BadAuthenticationType: if GSS-API Key Exchange was not performed
- (and no event was passed in)
- :raise AuthenticationException: if the authentication failed (and no
- event was passed in)
- :raise SSHException: if there was a network error
+ :param str username: The username to authenticate as.
+ :returns:
+ a `list` of auth types permissible for the next stage of
+ authentication (normally empty)
+ :raises: `.BadAuthenticationType` --
+ if GSS-API Key Exchange was not performed (and no event was passed
+ in)
+ :raises: `.AuthenticationException` --
+ if the authentication failed (and no event was passed in)
+ :raises: `.SSHException` -- if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
@@ -1503,9 +1578,10 @@ class Transport (threading.Thread, ClosingContextManager):
def getpeername(self):
"""
Return the address of the remote side of this Transport, if possible.
- This is effectively a wrapper around ``'getpeername'`` on the underlying
- socket. If the socket-like object has no ``'getpeername'`` method,
- then ``("unknown", 0)`` is returned.
+
+ This is effectively a wrapper around ``getpeername`` on the underlying
+ socket. If the socket-like object has no ``getpeername`` method, then
+ ``("unknown", 0)`` is returned.
:return:
the address of the remote host, if known, as a ``(str, int)``
@@ -1519,10 +1595,27 @@ class Transport (threading.Thread, ClosingContextManager):
def stop_thread(self):
self.active = False
self.packetizer.close()
- while self.is_alive() and (self is not threading.current_thread()):
- self.join(10)
-
- ### internals...
+ if PY2:
+ # Original join logic; #520 doesn't appear commonly present under
+ # Python 2.
+ while self.is_alive() and self is not threading.current_thread():
+ self.join(10)
+ else:
+ # Keep trying to join() our main thread, quickly, until:
+ # * We join()ed successfully (self.is_alive() == False)
+ # * Or it looks like we've hit issue #520 (socket.recv hitting some
+ # race condition preventing it from timing out correctly), wherein
+ # our socket and packetizer are both closed (but where we'd
+ # otherwise be sitting forever on that recv()).
+ while (
+ self.is_alive() and
+ self is not threading.current_thread() and
+ not self.sock._closed and
+ not self.packetizer.closed
+ ):
+ self.join(0.1)
+
+ # internals...
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
@@ -1560,28 +1653,32 @@ class Transport (threading.Thread, ClosingContextManager):
while True:
self.clear_to_send.wait(0.1)
if not self.active:
- self._log(DEBUG, 'Dropping user packet because connection is dead.')
+ self._log(DEBUG, 'Dropping user packet because connection is dead.') # noqa
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.is_set():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
- raise SSHException('Key-exchange timed out waiting for key negotiation')
+ raise SSHException('Key-exchange timed out waiting for key negotiation') # noqa
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
- """used by a kex object to set the K (root key) and H (exchange hash)"""
+ """
+ Used by a kex obj to set the K (root key) and H (exchange hash).
+ """
self.K = k
self.H = h
if self.session_id is None:
self.session_id = h
def _expect_packet(self, *ptypes):
- """used by a kex object to register the next packet type it expects to see"""
+ """
+ Used by a kex obj to register the next packet type it expects to see.
+ """
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
@@ -1589,7 +1686,7 @@ class Transport (threading.Thread, ClosingContextManager):
if key is None:
raise SSHException('Unknown host key type')
if not key.verify_ssh_sig(self.H, Message(sig)):
- raise SSHException('Signature verification (%s) failed.' % self.host_key_type)
+ raise SSHException('Signature verification (%s) failed.' % self.host_key_type) # noqa
self.host_key = key
def _compute_key(self, id, nbytes):
@@ -1602,7 +1699,9 @@ class Transport (threading.Thread, ClosingContextManager):
# Fallback to SHA1 for kex engines that fail to specify a hex
# algorithm, or for e.g. transport tests that don't run kexinit.
hash_algo = getattr(self.kex_engine, 'hash_algo', None)
- hash_select_msg = "kex engine %s specified hash_algo %r" % (self.kex_engine.__class__.__name__, hash_algo)
+ hash_select_msg = "kex engine %s specified hash_algo %r" % (
+ self.kex_engine.__class__.__name__, hash_algo
+ )
if hash_algo is None:
hash_algo = sha1
hash_select_msg += ", falling back to sha1"
@@ -1683,14 +1782,15 @@ class Transport (threading.Thread, ClosingContextManager):
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
+ tid = hex(long(id(self)) & xffffffff)
if self.server_mode:
- self._log(DEBUG, 'starting thread (server mode): %s' % hex(long(id(self)) & xffffffff))
+ self._log(DEBUG, 'starting thread (server mode): %s' % tid)
else:
- self._log(DEBUG, 'starting thread (client mode): %s' % hex(long(id(self)) & xffffffff))
+ self._log(DEBUG, 'starting thread (client mode): %s' % tid)
try:
try:
self.packetizer.write_all(b(self.local_version + '\r\n'))
- self._log(DEBUG, 'Local version/idstring: %s' % self.local_version)
+ self._log(DEBUG, 'Local version/idstring: %s' % self.local_version) # noqa
self._check_banner()
# The above is actually very much part of the handshake, but
# sometimes the banner can be read but the machine is not
@@ -1722,7 +1822,7 @@ class Transport (threading.Thread, ClosingContextManager):
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
- raise SSHException('Expecting packet from %r, got %d' % (self._expected_packet, ptype))
+ raise SSHException('Expecting packet from %r, got %d' % (self._expected_packet, ptype)) # noqa
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 41):
self.kex_engine.parse_next(ptype, m)
@@ -1736,13 +1836,17 @@ class Transport (threading.Thread, ClosingContextManager):
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
- self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid)
+ self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid) # noqa
else:
- self._log(ERROR, 'Channel request for unknown channel %d' % chanid)
+ self._log(ERROR, 'Channel request for unknown channel %d' % chanid) # noqa
self.active = False
self.packetizer.close()
- elif (self.auth_handler is not None) and (ptype in self.auth_handler._handler_table):
- self.auth_handler._handler_table[ptype](self.auth_handler, m)
+ elif (
+ self.auth_handler is not None and
+ ptype in self.auth_handler._handler_table
+ ):
+ handler = self.auth_handler._handler_table[ptype]
+ handler(self.auth_handler, m)
else:
self._log(WARNING, 'Oops, unhandled type %d' % ptype)
msg = Message()
@@ -1756,7 +1860,6 @@ class Transport (threading.Thread, ClosingContextManager):
self.saved_exception = e
except EOFError as e:
self._log(DEBUG, 'EOF in transport thread')
- #self._log(DEBUG, util.tb_strings())
self.saved_exception = e
except socket.error as e:
if type(e.args) is tuple:
@@ -1810,7 +1913,7 @@ class Transport (threading.Thread, ClosingContextManager):
msg += "local={0}, remote={1}".format(local, remote)
self._log(DEBUG, msg)
- ### protocol stages
+ # protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
@@ -1839,7 +1942,9 @@ class Transport (threading.Thread, ClosingContextManager):
except ProxyCommandFailure:
raise
except Exception as e:
- raise SSHException('Error reading SSH protocol banner' + str(e))
+ raise SSHException(
+ 'Error reading SSH protocol banner' + str(e)
+ )
if buf[:4] == 'SSH-':
break
self._log(DEBUG, 'Banner: ' + buf)
@@ -1849,10 +1954,10 @@ class Transport (threading.Thread, ClosingContextManager):
self.remote_version = buf
self._log(DEBUG, 'Remote version/idstring: %s' % buf)
# pull off any attached comment
- comment = ''
+ # NOTE: comment used to be stored in a variable and then...never used.
+ # since 2003. ca 877cd974b8182d26fa76d566072917ea67b64e67
i = buf.find(' ')
if i >= 0:
- comment = buf[i+1:]
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split('-', 2)
@@ -1861,8 +1966,10 @@ class Transport (threading.Thread, ClosingContextManager):
version = segs[1]
client = segs[2]
if version != '1.99' and version != '2.0':
- raise SSHException('Incompatible version (%s instead of 2.0)' % (version,))
- self._log(INFO, 'Connected (version %s, client %s)' % (version, client))
+ msg = 'Incompatible version ({0} instead of 2.0)'
+ raise SSHException(msg.format(version))
+ msg = 'Connected (version {0}, client {1})'.format(version, client)
+ self._log(INFO, msg)
def _send_kex_init(self):
"""
@@ -1877,14 +1984,24 @@ class Transport (threading.Thread, ClosingContextManager):
self.in_kex = True
if self.server_mode:
mp_required_prefix = 'diffie-hellman-group-exchange-sha'
- kex_mp = [k for k in self._preferred_kex if k.startswith(mp_required_prefix)]
+ kex_mp = [
+ k for k
+ in self._preferred_kex
+ if k.startswith(mp_required_prefix)
+ ]
if (self._modulus_pack is None) and (len(kex_mp) > 0):
- # can't do group-exchange if we don't have a pack of potential primes
- pkex = [k for k in self.get_security_options().kex
- if not k.startswith(mp_required_prefix)]
+ # can't do group-exchange if we don't have a pack of potential
+ # primes
+ pkex = [
+ k for k
+ in self.get_security_options().kex
+ if not k.startswith(mp_required_prefix)
+ ]
self.get_security_options().kex = pkex
- available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__,
- self._preferred_keys))
+ available_server_keys = list(filter(
+ list(self.server_key_dict.keys()).__contains__,
+ self._preferred_keys
+ ))
else:
available_server_keys = self._preferred_keys
@@ -1908,7 +2025,7 @@ class Transport (threading.Thread, ClosingContextManager):
self._send_message(m)
def _parse_kex_init(self, m):
- cookie = m.get_bytes(16)
+ m.get_bytes(16) # cookie, discarded
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
@@ -1920,18 +2037,21 @@ class Transport (threading.Thread, ClosingContextManager):
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
- unused = m.get_int()
-
- self._log(DEBUG, 'kex algos:' + str(kex_algo_list) + ' server key:' + str(server_key_algo_list) +
- ' client encrypt:' + str(client_encrypt_algo_list) +
- ' server encrypt:' + str(server_encrypt_algo_list) +
- ' client mac:' + str(client_mac_algo_list) +
- ' server mac:' + str(server_mac_algo_list) +
- ' client compress:' + str(client_compress_algo_list) +
- ' server compress:' + str(server_compress_algo_list) +
- ' client lang:' + str(client_lang_list) +
- ' server lang:' + str(server_lang_list) +
- ' kex follows?' + str(kex_follows))
+ m.get_int() # unused
+
+ self._log(DEBUG,
+ 'kex algos:' + str(kex_algo_list) +
+ ' server key:' + str(server_key_algo_list) +
+ ' client encrypt:' + str(client_encrypt_algo_list) +
+ ' server encrypt:' + str(server_encrypt_algo_list) +
+ ' client mac:' + str(client_mac_algo_list) +
+ ' server mac:' + str(server_mac_algo_list) +
+ ' client compress:' + str(client_compress_algo_list) +
+ ' server compress:' + str(server_compress_algo_list) +
+ ' client lang:' + str(client_lang_list) +
+ ' server lang:' + str(server_lang_list) +
+ ' kex follows?' + str(kex_follows)
+ )
# as a server, we pick the first item in the client's list that we
# support.
@@ -1948,34 +2068,48 @@ class Transport (threading.Thread, ClosingContextManager):
self._preferred_kex
))
if len(agreed_kex) == 0:
- raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)')
+ raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)') # noqa
self.kex_engine = self._kex_info[agreed_kex[0]](self)
self._log(DEBUG, "Kex agreed: %s" % agreed_kex[0])
if self.server_mode:
- available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__,
- self._preferred_keys))
- agreed_keys = list(filter(available_server_keys.__contains__, server_key_algo_list))
+ available_server_keys = list(filter(
+ list(self.server_key_dict.keys()).__contains__,
+ self._preferred_keys
+ ))
+ agreed_keys = list(filter(
+ available_server_keys.__contains__, server_key_algo_list
+ ))
else:
- agreed_keys = list(filter(server_key_algo_list.__contains__, self._preferred_keys))
+ agreed_keys = list(filter(
+ server_key_algo_list.__contains__, self._preferred_keys
+ ))
if len(agreed_keys) == 0:
- raise SSHException('Incompatible ssh peer (no acceptable host key)')
+ raise SSHException('Incompatible ssh peer (no acceptable host key)') # noqa
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
- raise SSHException('Incompatible ssh peer (can\'t match requested host key type)')
+ raise SSHException('Incompatible ssh peer (can\'t match requested host key type)') # noqa
if self.server_mode:
- agreed_local_ciphers = list(filter(self._preferred_ciphers.__contains__,
- server_encrypt_algo_list))
- agreed_remote_ciphers = list(filter(self._preferred_ciphers.__contains__,
- client_encrypt_algo_list))
+ agreed_local_ciphers = list(filter(
+ self._preferred_ciphers.__contains__,
+ server_encrypt_algo_list
+ ))
+ agreed_remote_ciphers = list(filter(
+ self._preferred_ciphers.__contains__,
+ client_encrypt_algo_list
+ ))
else:
- agreed_local_ciphers = list(filter(client_encrypt_algo_list.__contains__,
- self._preferred_ciphers))
- agreed_remote_ciphers = list(filter(server_encrypt_algo_list.__contains__,
- self._preferred_ciphers))
- if (len(agreed_local_ciphers) == 0) or (len(agreed_remote_ciphers) == 0):
- raise SSHException('Incompatible ssh server (no acceptable ciphers)')
+ agreed_local_ciphers = list(filter(
+ client_encrypt_algo_list.__contains__,
+ self._preferred_ciphers
+ ))
+ agreed_remote_ciphers = list(filter(
+ server_encrypt_algo_list.__contains__,
+ self._preferred_ciphers
+ ))
+ if len(agreed_local_ciphers) == 0 or len(agreed_remote_ciphers) == 0:
+ raise SSHException('Incompatible ssh server (no acceptable ciphers)') # noqa
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log_agreement(
@@ -1983,11 +2117,19 @@ class Transport (threading.Thread, ClosingContextManager):
)
if self.server_mode:
- agreed_remote_macs = list(filter(self._preferred_macs.__contains__, client_mac_algo_list))
- agreed_local_macs = list(filter(self._preferred_macs.__contains__, server_mac_algo_list))
+ agreed_remote_macs = list(filter(
+ self._preferred_macs.__contains__, client_mac_algo_list
+ ))
+ agreed_local_macs = list(filter(
+ self._preferred_macs.__contains__, server_mac_algo_list
+ ))
else:
- agreed_local_macs = list(filter(client_mac_algo_list.__contains__, self._preferred_macs))
- agreed_remote_macs = list(filter(server_mac_algo_list.__contains__, self._preferred_macs))
+ agreed_local_macs = list(filter(
+ client_mac_algo_list.__contains__, self._preferred_macs
+ ))
+ agreed_remote_macs = list(filter(
+ server_mac_algo_list.__contains__, self._preferred_macs
+ ))
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException('Incompatible ssh server (no acceptable macs)')
self.local_mac = agreed_local_macs[0]
@@ -1997,13 +2139,32 @@ class Transport (threading.Thread, ClosingContextManager):
)
if self.server_mode:
- agreed_remote_compression = list(filter(self._preferred_compression.__contains__, client_compress_algo_list))
- agreed_local_compression = list(filter(self._preferred_compression.__contains__, server_compress_algo_list))
+ agreed_remote_compression = list(filter(
+ self._preferred_compression.__contains__,
+ client_compress_algo_list
+ ))
+ agreed_local_compression = list(filter(
+ self._preferred_compression.__contains__,
+ server_compress_algo_list
+ ))
else:
- agreed_local_compression = list(filter(client_compress_algo_list.__contains__, self._preferred_compression))
- agreed_remote_compression = list(filter(server_compress_algo_list.__contains__, self._preferred_compression))
- if (len(agreed_local_compression) == 0) or (len(agreed_remote_compression) == 0):
- raise SSHException('Incompatible ssh server (no acceptable compression) %r %r %r' % (agreed_local_compression, agreed_remote_compression, self._preferred_compression))
+ agreed_local_compression = list(filter(
+ client_compress_algo_list.__contains__,
+ self._preferred_compression
+ ))
+ agreed_remote_compression = list(filter(
+ server_compress_algo_list.__contains__,
+ self._preferred_compression
+ ))
+ if (
+ len(agreed_local_compression) == 0 or
+ len(agreed_remote_compression) == 0
+ ):
+ msg = 'Incompatible ssh server (no acceptable compression) {0!r} {1!r} {2!r}' # noqa
+ raise SSHException(msg.format(
+ agreed_local_compression, agreed_remote_compression,
+ self._preferred_compression,
+ ))
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log_agreement(
@@ -2020,15 +2181,22 @@ class Transport (threading.Thread, ClosingContextManager):
self.remote_kex_init = cMSG_KEXINIT + m.get_so_far()
def _activate_inbound(self):
- """switch on newly negotiated encryption parameters for inbound traffic"""
+ """switch on newly negotiated encryption parameters for
+ inbound traffic"""
block_size = self._cipher_info[self.remote_cipher]['block-size']
if self.server_mode:
IV_in = self._compute_key('A', block_size)
- key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size'])
+ key_in = self._compute_key(
+ 'C', self._cipher_info[self.remote_cipher]['key-size']
+ )
else:
IV_in = self._compute_key('B', block_size)
- key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size'])
- engine = self._get_cipher(self.remote_cipher, key_in, IV_in, self._DECRYPT)
+ key_in = self._compute_key(
+ 'D', self._cipher_info[self.remote_cipher]['key-size']
+ )
+ engine = self._get_cipher(
+ self.remote_cipher, key_in, IV_in, self._DECRYPT
+ )
mac_size = self._mac_info[self.remote_mac]['size']
mac_engine = self._mac_info[self.remote_mac]['class']
# initial mac keys are done in the hash's natural size (not the
@@ -2037,25 +2205,37 @@ class Transport (threading.Thread, ClosingContextManager):
mac_key = self._compute_key('E', mac_engine().digest_size)
else:
mac_key = self._compute_key('F', mac_engine().digest_size)
- self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
+ self.packetizer.set_inbound_cipher(
+ engine, block_size, mac_engine, mac_size, mac_key
+ )
compress_in = self._compression_info[self.remote_compression][1]
- if (compress_in is not None) and ((self.remote_compression != 'zlib@openssh.com') or self.authenticated):
+ if (
+ compress_in is not None and
+ (
+ self.remote_compression != 'zlib@openssh.com' or
+ self.authenticated
+ )
+ ):
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
- """switch on newly negotiated encryption parameters for outbound traffic"""
+ """switch on newly negotiated encryption parameters for
+ outbound traffic"""
m = Message()
m.add_byte(cMSG_NEWKEYS)
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]['block-size']
if self.server_mode:
IV_out = self._compute_key('B', block_size)
- key_out = self._compute_key('D', self._cipher_info[self.local_cipher]['key-size'])
+ key_out = self._compute_key(
+ 'D', self._cipher_info[self.local_cipher]['key-size'])
else:
IV_out = self._compute_key('A', block_size)
- key_out = self._compute_key('C', self._cipher_info[self.local_cipher]['key-size'])
- engine = self._get_cipher(self.local_cipher, key_out, IV_out, self._ENCRYPT)
+ key_out = self._compute_key(
+ 'C', self._cipher_info[self.local_cipher]['key-size'])
+ engine = self._get_cipher(
+ self.local_cipher, key_out, IV_out, self._ENCRYPT)
mac_size = self._mac_info[self.local_mac]['size']
mac_engine = self._mac_info[self.local_mac]['class']
# initial mac keys are done in the hash's natural size (not the
@@ -2065,9 +2245,16 @@ class Transport (threading.Thread, ClosingContextManager):
else:
mac_key = self._compute_key('E', mac_engine().digest_size)
sdctr = self.local_cipher.endswith('-ctr')
- self.packetizer.set_outbound_cipher(engine, block_size, mac_engine, mac_size, mac_key, sdctr)
+ self.packetizer.set_outbound_cipher(
+ engine, block_size, mac_engine, mac_size, mac_key, sdctr)
compress_out = self._compression_info[self.local_compression][0]
- if (compress_out is not None) and ((self.local_compression != 'zlib@openssh.com') or self.authenticated):
+ if (
+ compress_out is not None and
+ (
+ self.local_compression != 'zlib@openssh.com' or
+ self.authenticated
+ )
+ ):
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
@@ -2123,7 +2310,10 @@ class Transport (threading.Thread, ClosingContextManager):
self._log(DEBUG, 'Received global request "%s"' % kind)
want_reply = m.get_boolean()
if not self.server_mode:
- self._log(DEBUG, 'Rejecting "%s" global request from server.' % kind)
+ self._log(
+ DEBUG,
+ 'Rejecting "%s" global request from server.' % kind
+ )
ok = False
elif kind == 'tcpip-forward':
address = m.get_text()
@@ -2174,7 +2364,8 @@ class Transport (threading.Thread, ClosingContextManager):
return
self.lock.acquire()
try:
- chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size)
+ chan._set_remote_channel(
+ server_chanid, server_window_size, server_max_packet_size)
self._log(DEBUG, 'Secsh channel %d opened.' % chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
@@ -2187,9 +2378,13 @@ class Transport (threading.Thread, ClosingContextManager):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_text()
- lang = m.get_text()
+ m.get_text() # ignored language
reason_text = CONNECTION_FAILED_CODE.get(reason, '(unknown code)')
- self._log(ERROR, 'Secsh channel %d open FAILED: %s: %s' % (chanid, reason_str, reason_text))
+ self._log(
+ ERROR,
+ 'Secsh channel %d open FAILED: %s: %s' % (
+ chanid, reason_str, reason_text)
+ )
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
@@ -2208,7 +2403,10 @@ class Transport (threading.Thread, ClosingContextManager):
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
- if (kind == 'auth-agent@openssh.com') and (self._forward_agent_handler is not None):
+ if (
+ kind == 'auth-agent@openssh.com' and
+ self._forward_agent_handler is not None
+ ):
self._log(DEBUG, 'Incoming forward agent connection')
self.lock.acquire()
try:
@@ -2218,7 +2416,11 @@ class Transport (threading.Thread, ClosingContextManager):
elif (kind == 'x11') and (self._x11_handler is not None):
origin_addr = m.get_text()
origin_port = m.get_int()
- self._log(DEBUG, 'Incoming x11 connection from %s:%d' % (origin_addr, origin_port))
+ self._log(
+ DEBUG,
+ 'Incoming x11 connection from %s:%d' % (
+ origin_addr, origin_port)
+ )
self.lock.acquire()
try:
my_chanid = self._next_channel()
@@ -2229,14 +2431,20 @@ class Transport (threading.Thread, ClosingContextManager):
server_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
- self._log(DEBUG, 'Incoming tcp forwarded connection from %s:%d' % (origin_addr, origin_port))
+ self._log(
+ DEBUG,
+ 'Incoming tcp forwarded connection from %s:%d' % (
+ origin_addr, origin_port)
+ )
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
- self._log(DEBUG, 'Rejecting "%s" channel request from server.' % kind)
+ self._log(
+ DEBUG,
+ 'Rejecting "%s" channel request from server.' % kind)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
@@ -2246,17 +2454,23 @@ class Transport (threading.Thread, ClosingContextManager):
finally:
self.lock.release()
if kind == 'direct-tcpip':
- # handle direct-tcpip requests comming from the client
+ # handle direct-tcpip requests coming from the client
dest_addr = m.get_text()
dest_port = m.get_int()
origin_addr = m.get_text()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
- my_chanid, (origin_addr, origin_port), (dest_addr, dest_port))
+ my_chanid,
+ (origin_addr, origin_port),
+ (dest_addr, dest_port)
+ )
else:
- reason = self.server_object.check_channel_request(kind, my_chanid)
+ reason = self.server_object.check_channel_request(
+ kind, my_chanid)
if reason != OPEN_SUCCEEDED:
- self._log(DEBUG, 'Rejecting "%s" channel request from client.' % kind)
+ self._log(
+ DEBUG,
+ 'Rejecting "%s" channel request from client.' % kind)
reject = True
if reject:
msg = Message()
@@ -2274,8 +2488,10 @@ class Transport (threading.Thread, ClosingContextManager):
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
- chan._set_window(self.default_window_size, self.default_max_packet_size)
- chan._set_remote_channel(chanid, initial_window_size, max_packet_size)
+ chan._set_window(
+ self.default_window_size, self.default_max_packet_size)
+ chan._set_remote_channel(
+ chanid, initial_window_size, max_packet_size)
finally:
self.lock.release()
m = Message()
@@ -2292,14 +2508,18 @@ class Transport (threading.Thread, ClosingContextManager):
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == 'forwarded-tcpip':
chan.origin_addr = (origin_addr, origin_port)
- self._tcp_handler(chan, (origin_addr, origin_port), (server_addr, server_port))
+ self._tcp_handler(
+ chan,
+ (origin_addr, origin_port),
+ (server_addr, server_port)
+ )
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
- always_display = m.get_boolean()
+ m.get_boolean() # always_display
msg = m.get_string()
- lang = m.get_string()
+ m.get_string() # language
self._log(DEBUG, 'Debug msg: {0}'.format(util.safe_string(msg)))
def _get_subsystem_handler(self, name):
@@ -2346,7 +2566,6 @@ class SecurityOptions (object):
``ValueError`` will be raised. If you try to assign something besides a
tuple to one of the fields, ``TypeError`` will be raised.
"""
- #__slots__ = [ 'ciphers', 'digests', 'key_types', 'kex', 'compression', '_transport' ]
__slots__ = '_transport'
def __init__(self, transport):
diff --git a/paramiko/util.py b/paramiko/util.py
index 5ad1e3fd..de099c0c 100644
--- a/paramiko/util.py
+++ b/paramiko/util.py
@@ -35,7 +35,8 @@ from paramiko.config import SSHConfig
def inflate_long(s, always_positive=False):
- """turns a normalized byte string into a long-int (adapted from Crypto.Util.number)"""
+ """turns a normalized byte string into a long-int
+ (adapted from Crypto.Util.number)"""
out = long(0)
negative = 0
if not always_positive and (len(s) > 0) and (byte_ord(s[0]) >= 0x80):
@@ -48,17 +49,19 @@ def inflate_long(s, always_positive=False):
# noinspection PyAugmentAssignment
s = filler * (4 - len(s) % 4) + s
for i in range(0, len(s), 4):
- out = (out << 32) + struct.unpack('>I', s[i:i+4])[0]
+ out = (out << 32) + struct.unpack('>I', s[i:i + 4])[0]
if negative:
out -= (long(1) << (8 * len(s)))
return out
+
deflate_zero = zero_byte if PY2 else 0
deflate_ff = max_byte if PY2 else 0xff
def deflate_long(n, add_sign_padding=True):
- """turns a long-int into a normalized byte string (adapted from Crypto.Util.number)"""
+ """turns a long-int into a normalized byte string
+ (adapted from Crypto.Util.number)"""
# after much testing, this algorithm was deemed to be the fastest
s = bytes()
n = long(n)
@@ -91,16 +94,16 @@ def format_binary(data, prefix=''):
x = 0
out = []
while len(data) > x + 16:
- out.append(format_binary_line(data[x:x+16]))
+ out.append(format_binary_line(data[x:x + 16]))
x += 16
if x < len(data):
out.append(format_binary_line(data[x:]))
- return [prefix + x for x in out]
+ return [prefix + line for line in out]
def format_binary_line(data):
left = ' '.join(['%02X' % byte_ord(c) for c in data])
- right = ''.join([('.%c..' % c)[(byte_ord(c)+63)//95] for c in data])
+ right = ''.join([('.%c..' % c)[(byte_ord(c) + 63) // 95] for c in data])
return '%-50s %s' % (left, right)
@@ -215,6 +218,7 @@ def mod_inverse(x, m):
u2 += m
return u2
+
_g_thread_ids = {}
_g_thread_counter = 0
_g_thread_lock = threading.Lock()
@@ -236,15 +240,16 @@ def get_thread_id():
def log_to_file(filename, level=DEBUG):
- """send paramiko logs to a logfile, if they're not already going somewhere"""
+ """send paramiko logs to a logfile,
+ if they're not already going somewhere"""
l = logging.getLogger("paramiko")
if len(l.handlers) > 0:
return
l.setLevel(level)
- f = open(filename, 'w')
+ f = open(filename, 'a')
lh = logging.StreamHandler(f)
- lh.setFormatter(logging.Formatter('%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d %(name)s: %(message)s',
- '%Y%m%d-%H:%M:%S'))
+ frm = '%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d %(name)s: %(message)s' # noqa
+ lh.setFormatter(logging.Formatter(frm, '%Y%m%d-%H:%M:%S'))
l.addHandler(lh)
@@ -253,6 +258,8 @@ class PFilter (object):
def filter(self, record):
record._threadid = get_thread_id()
return True
+
+
_pfilter = PFilter()
@@ -277,7 +284,7 @@ def constant_time_bytes_eq(a, b):
return False
res = 0
# noinspection PyUnresolvedReferences
- for i in (xrange if PY2 else range)(len(a)):
+ for i in (xrange if PY2 else range)(len(a)): # noqa: F821
res |= byte_ord(a[i]) ^ byte_ord(b[i])
return res == 0
diff --git a/paramiko/win_pageant.py b/paramiko/win_pageant.py
index 4b482bee..c8c2c7bc 100644
--- a/paramiko/win_pageant.py
+++ b/paramiko/win_pageant.py
@@ -25,13 +25,13 @@ import array
import ctypes.wintypes
import platform
import struct
-from paramiko.util import *
+from paramiko.util import * # noqa
from paramiko.py3compat import b
try:
- import _thread as thread # Python 3.x
+ import _thread as thread # Python 3.x
except ImportError:
- import thread # Python 2.5-2.7
+ import thread # Python 2.5-2.7
from . import _winapi
@@ -57,7 +57,10 @@ def can_talk_to_agent():
return bool(_get_pageant_window_object())
-ULONG_PTR = ctypes.c_uint64 if platform.architecture()[0] == '64bit' else ctypes.c_uint32
+if platform.architecture()[0] == '64bit':
+ ULONG_PTR = ctypes.c_uint64
+else:
+ ULONG_PTR = ctypes.c_uint32
class COPYDATASTRUCT(ctypes.Structure):
@@ -91,7 +94,7 @@ def _query_pageant(msg):
with pymap:
pymap.write(msg)
# Create an array buffer containing the mapped filename
- char_buffer = array.array("b", b(map_name) + zero_byte)
+ char_buffer = array.array("b", b(map_name) + zero_byte) # noqa
char_buffer_address, char_buffer_size = char_buffer.buffer_info()
# Create a string to use for the SendMessage function call
cds = COPYDATASTRUCT(_AGENT_COPYDATA_ID, char_buffer_size,
diff --git a/setup.cfg b/setup.cfg
index 5e409001..f2c1499d 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,2 +1,10 @@
[wheel]
universal = 1
+
+[coverage:run]
+omit = paramiko/_winapi.py
+
+[flake8]
+exclude = sites,.git,build,dist,demos,tests
+ignore = E124,E125,E128,E261,E301,E302,E303,E402
+max-line-length = 79
diff --git a/setup.py b/setup.py
index 4f370d63..80d5ea7f 100644
--- a/setup.py
+++ b/setup.py
@@ -16,6 +16,13 @@
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA.
+import sys
+from setuptools import setup
+
+if sys.platform == 'darwin':
+ import setup_helper
+
+ setup_helper.install_custom_make_tarball()
longdesc = '''
This is a library for making SSH2 connections (client or server).
@@ -26,19 +33,10 @@ are supported. SFTP client and server mode are both supported too.
Required packages:
Cryptography
-To install the `in-development version
-<https://github.com/paramiko/paramiko/tarball/master#egg=paramiko-dev>`_, use
-`pip install paramiko==dev`.
+To install the development version, ``pip install -e
+git+https://github.com/paramiko/paramiko/#egg=paramiko``.
'''
-import sys
-from setuptools import setup
-
-
-if sys.platform == 'darwin':
- import setup_helper
- setup_helper.install_custom_make_tarball()
-
# Version info -- read without importing
_locals = {}
@@ -46,22 +44,22 @@ with open('paramiko/_version.py') as fp:
exec(fp.read(), None, _locals)
version = _locals['__version__']
-
setup(
- name = "paramiko",
- version = version,
- description = "SSH2 protocol library",
- long_description = longdesc,
- author = "Jeff Forcier",
- author_email = "jeff@bitprophet.org",
- url = "https://github.com/paramiko/paramiko/",
- packages = [ 'paramiko' ],
- license = 'LGPL',
- platforms = 'Posix; MacOS X; Windows',
- classifiers = [
+ name="paramiko",
+ version=version,
+ description="SSH2 protocol library",
+ long_description=longdesc,
+ author="Jeff Forcier",
+ author_email="jeff@bitprophet.org",
+ url="https://github.com/paramiko/paramiko/",
+ packages=['paramiko'],
+ license='LGPL',
+ platforms='Posix; MacOS X; Windows',
+ classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
- 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
+ 'License :: OSI Approved :: '
+ 'GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Topic :: Internet',
'Topic :: Security :: Cryptography',
@@ -76,7 +74,7 @@ setup(
'Programming Language :: Python :: 3.5',
],
install_requires=[
- 'cryptography>=0.8',
+ 'cryptography>=1.1',
'pyasn1>=0.1.7',
],
)
diff --git a/setup_helper.py b/setup_helper.py
index 9e3834b3..c359a16c 100644
--- a/setup_helper.py
+++ b/setup_helper.py
@@ -77,7 +77,7 @@ def make_tarball(base_name, base_dir, compress='gzip', verbose=0, dry_run=0,
For 'gzip' and 'bzip2' the internal tarfile module will be used.
For 'compress' the .tar will be created using tarfile, and then
we will spawn 'compress' afterwards.
- The output tar file will be named 'base_name' + ".tar",
+ The output tar file will be named 'base_name' + ".tar",
possibly plus the appropriate compression extension (".gz",
".bz2" or ".Z"). Return the output filename.
"""
@@ -87,12 +87,14 @@ def make_tarball(base_name, base_dir, compress='gzip', verbose=0, dry_run=0,
# "create a tree of hardlinks" step! (Would also be nice to
# detect GNU tar to use its 'z' option and save a step.)
- compress_ext = { 'gzip': ".gz",
- 'bzip2': '.bz2',
- 'compress': ".Z" }
+ compress_ext = {
+ 'gzip': ".gz",
+ 'bzip2': '.bz2',
+ 'compress': ".Z",
+ }
# flags for compression program, each element of list will be an argument
- tarfile_compress_flag = {'gzip':'gz', 'bzip2':'bz2'}
+ tarfile_compress_flag = {'gzip': 'gz', 'bzip2': 'bz2'}
compress_flags = {'compress': ["-f"]}
if compress is not None and compress not in compress_ext.keys():
@@ -144,11 +146,10 @@ def make_tarball(base_name, base_dir, compress='gzip', verbose=0, dry_run=0,
_custom_formats = {
'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
- 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
- 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
+ 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
+ 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
}
# Hack in and insert ourselves into the distutils code base
def install_custom_make_tarball():
distutils.archive_util.ARCHIVE_FORMATS.update(_custom_formats)
-
diff --git a/sites/www/changelog.rst b/sites/www/changelog.rst
index 7cca1840..8ffde787 100644
--- a/sites/www/changelog.rst
+++ b/sites/www/changelog.rst
@@ -2,11 +2,158 @@
Changelog
=========
-* :bug:`676` (via :issue:`677`) Fix a backwards incompatibility issue that
- cropped up in `SFTPFile.prefetch <~paramiko.sftp_file.prefetch>` re: the
- erroneously non-optional ``file_size`` parameter. Should only affect users
- who manually call ``prefetch``. Thanks to ``@stevevanhooser`` for catch &
+* :support:`- backported` A big formatting pass to clean up an enormous number
+ of invalid Sphinx reference links, discovered by switching to a modern,
+ rigorous nitpicking doc-building mode.
+* :bug:`900` (via :issue:`911`) Prefer newer ``ecdsa-sha2-nistp`` keys over RSA
+ and DSA keys during host key selection. This improves compatibility with
+ OpenSSH, both in terms of general behavior, and also re: ability to properly
+ leverage OpenSSH-modified ``known_hosts`` files. Credit: ``@kasdoe`` for
+ original report/PR and Pierce Lopez for the second draft.
+* :bug:`794` (via :issue:`981`) Prior support for ``ecdsa-sha2-nistp(384|521)``
+ algorithms didn't fully extend to covering host keys, preventing connection
+ to hosts which only offer these key types and no others. This is now fixed.
+ Thanks to ``@ncoult`` and ``@kasdoe`` for reports and Pierce Lopez for the
+ patch.
+* :support:`974 backported` Overhaul the codebase to be PEP-8, etc, compliant
+ (i.e. passes the maintainer's preferred `flake8 <http://flake8.pycqa.org/>`_
+ configuration) and add a ``flake8`` step to the Travis config. Big thanks to
+ Dorian Pula!
+* :bug:`683` Make ``util.log_to_file`` append instead of replace. Thanks
+ to ``@vlcinsky`` for the report.
+* :release:`2.0.5 <2017-02-20>`
+* :release:`1.18.2 <2017-02-20>`
+* :release:`1.17.4 <2017-02-20>`
+* :bug:`853 (1.17+)` Tweak how `RSAKey.__str__ <paramiko.rsakey.RSAKey>`
+ behaves so it doesn't cause ``TypeError`` under Python 3. Thanks to Francisco
+ Couzo for the report.
+* :bug:`862 (1.17+)` (via :issue:`863`) Avoid test suite exceptions on
+ platforms lacking ``errno.ETIME`` (which seems to be some FreeBSD and some
+ Windows environments.) Thanks to Sofian Brabez.
+* :bug:`44 (1.17+)` (via :issue:`891`) `SSHClient <paramiko.client.SSHClient>`
+ now gives its internal `Transport <paramiko.transport.Transport>` a handle on
+ itself, preventing garbage collection of the client until the session is
+ closed. Without this, some code which returns stream or transport objects
+ without the client that generated them, would result in premature session
+ closure when the client was GCd. Credit: ``@w31rd0`` for original report,
+ Omer Anson for the patch.
+* :bug:`713 (<2.0)` (via :issue:`714` and :issue:`889`) Don't pass
+ initialization vectors to PyCrypto when dealing with counter-mode ciphers;
+ newer PyCrypto versions throw an exception otherwise (older ones simply
+ ignored this parameter altogether). Thanks to ``@jmh045000`` for report &
+ patches.
+* :bug:`895 (1.17+)` Fix a bug in server-mode concerning multiple interactive
+ auth steps (which were incorrectly responded to). Thanks to Dennis
+ Kaarsemaker for catch & patch.
+* :support:`866 backported (1.17+)` (also :issue:`838`) Remove an old
+ test-related file we don't support, and add PyPy to Travis-CI config. Thanks
+ to Pierce Lopez for the final patch and Pedro Rodrigues for an earlier
+ edition.
+* :release:`2.0.4 <2016-12-12>`
+* :release:`1.18.1 <2016-12-12>`
+* :bug:`859 (1.18+)` (via :issue:`860`) A tweak to the original patch
+ implementing :issue:`398` was not fully applied, causing calls to
+ `~paramiko.client.SSHClient.invoke_shell` to fail with ``AttributeError``.
+ This has been fixed. Patch credit: Kirk Byers.
+* :bug:`-` Accidentally merged the new features from 1.18.0 into the
+ 2.0.x bugfix-only branch. This included merging a bug in one of those new
+ features (breaking `~paramiko.client.SSHClient.invoke_shell` with an
+ ``AttributeError``.) The offending code has been stripped out of the 2.0.x
+ line (but of course, remains in 2.1.x and above.)
+* :release:`2.0.3 <2016-12-09>`
+* :release:`1.18.0 <2016-12-09>`
+* :release:`1.17.3 <2016-12-09>`
+* :bug:`802 (1.17+)` (via :issue:`804`) Update our vendored Windows API module
+ to address errors of the form ``AttributeError: 'module' object has no
+ attribute 'c_ssize_t'``. Credit to Jason R. Coombs.
+* :bug:`824 (1.17+)` Fix the implementation of ``PKey.write_private_key_file``
+ (this method is only publicly defined on subclasses; the fix was in the
+ private real implementation) so it passes the correct params to ``open()``.
+ This bug apparently went unnoticed and unfixed for 12 entire years. Congrats
+ to John Villalovos for noticing & submitting the patch!
+* :support:`801 backported (1.17+)` Skip a Unix-only test when on Windows;
+ thanks to Gabi Davar.
+* :support:`792 backported (1.17+)` Minor updates to the README and demos;
+ thanks to Alan Yee.
+* :feature:`780 (1.18+)` (also :issue:`779`, and may help users affected by
+ :issue:`520`) Add an optional ``timeout`` parameter to
+ `Transport.start_client <paramiko.transport.Transport.start_client>` (and
+ feed it the value of the configured connection timeout when used within
+ `SSHClient <paramiko.client.SSHClient>`.) This helps prevent situations where
+ network connectivity isn't timing out, but the remote server is otherwise
+ unable to service the connection in a timely manner. Credit to
+ ``@sanseihappa``.
+* :bug:`742` (also re: :issue:`559`) Catch ``AssertionError`` thrown by
+ Cryptography when attempting to load bad ECDSA keys, turning it into an
+ ``SSHException``. This moves the behavior in line with other "bad keys"
+ situations, re: Paramiko's main auth loop. Thanks to MengHuan Yu for the
patch.
+* :bug:`789 (1.17+)` Add a missing ``.closed`` attribute (plus ``._closed``
+ because reasons) to `ProxyCommand <paramiko.proxy.ProxyCommand>` so the
+ earlier partial fix for :issue:`520` works in situations where one is
+ gatewaying via ``ProxyCommand``.
+* :bug:`334 (1.17+)` Make the ``subprocess`` import in ``proxy.py`` lazy so
+ users on platforms without it (such as Google App Engine) can import Paramiko
+ successfully. (Relatedly, make it easier to tweak an active socket check
+ timeout [in `Transport <paramiko.transport.Transport>`] which was previously
+ hardcoded.) Credit: Shinya Okano.
+* :support:`854 backported (1.17+)` Fix incorrect docstring/param-list for
+ `Transport.auth_gssapi_keyex
+ <paramiko.transport.Transport.auth_gssapi_keyex>` so it matches the real
+ signature. Caught by ``@Score_Under``.
+* :bug:`681 (1.17+)` Fix a Python3-specific bug re: the handling of read
+ buffers when using ``ProxyCommand``. Thanks to Paul Kapp for catch & patch.
+* :support:`819 backported (>=1.15,<2.0)` Document how lacking ``gmp`` headers
+ at install time can cause a significant performance hit if you build PyCrypto
+ from source. (Most system-distributed packages already have this enabled.)
+* :release:`2.0.2 <2016-07-25>`
+* :release:`1.17.2 <2016-07-25>`
+* :release:`1.16.3 <2016-07-25>`
+* :bug:`673 (1.16+)` (via :issue:`681`) Fix protocol banner read errors
+ (``SSHException``) which would occasionally pop up when using
+ ``ProxyCommand`` gatewaying. Thanks to ``@Depado`` for the initial report and
+ Paul Kapp for the fix.
+* :bug:`774 (1.16+)` Add a ``_closed`` private attribute to
+ `~paramiko.channel.Channel` objects so that they continue functioning when
+ used as proxy sockets under Python 3 (e.g. as ``direct-tcpip`` gateways for
+ other Paramiko connections.)
+* :bug:`758 (1.16+)` Apply type definitions to ``_winapi`` module from
+ `jaraco.windows <https://github.com/jaraco/jaraco.windows>`_ 3.6.1. This
+ should address issues on Windows platforms that often result in errors like
+ ``ArgumentError: [...] int too long to convert``. Thanks to ``@swohlerLL``
+ for the report and Jason R. Coombs for the patch.
+* :release:`2.0.1 <2016-06-21>`
+* :release:`1.17.1 <2016-06-21>`
+* :release:`1.16.2 <2016-06-21>`
+* :bug:`520 (1.16+)` (Partial fix) Fix at least one instance of race condition
+ driven threading hangs at end of the Python interpreter session. (Includes a
+ docs update as well - always make sure to ``.close()`` your clients!)
+* :bug:`537 (1.16+)` Fix a bug in `BufferedPipe.set_event
+ <paramiko.buffered_pipe.BufferedPipe.set_event>` which could cause
+ deadlocks/hangs when one uses `select.select` against
+ `~paramiko.channel.Channel` objects (or otherwise calls `Channel.fileno
+ <paramiko.channel.Channel.fileno>` after the channel has closed). Thanks to
+ Przemysław Strzelczak for the report & reproduction case, and to Krzysztof
+ Rusek for the fix.
+* :release:`2.0.0 <2016-04-28>`
+* :release:`1.17.0 <2016-04-28>`
+* :release:`1.16.1 <2016-04-28>`
+* :release:`1.15.5 <2016-04-28>`
+* :feature:`731` (working off the earlier :issue:`611`) Add support for 384-
+ and 512-bit elliptic curve groups in ECDSA key types (aka
+ ``ecdsa-sha2-nistp384`` / ``ecdsa-sha2-nistp521``). Thanks to Michiel Tiller
+ and ``@CrazyCasta`` for the patches.
+* :bug:`670` Due to an earlier bugfix, less-specific ``Host`` blocks'
+ ``ProxyCommand`` values were overriding ``ProxyCommand none`` in
+ more-specific ``Host`` blocks. This has been fixed in a backwards compatible
+ manner (i.e. ``ProxyCommand none`` continues to appear as a total lack of any
+ ``proxycommand`` key in parsed config structures). Thanks to Pat Brisbin for
+ the catch.
+* :bug:`676` (via :issue:`677`) Fix a backwards incompatibility issue that
+ cropped up in `SFTPFile.prefetch <paramiko.sftp_file.SFTPFile.prefetch>` re:
+ the erroneously non-optional ``file_size`` parameter. Should only affect
+ users who manually call ``prefetch``. Thanks to ``@stevevanhooser`` for catch
+ & patch.
* :feature:`394` Replace PyCrypto with the Python Cryptographic Authority
(PyCA) 'Cryptography' library suite. This improves security, installability,
and performance; adds PyPy support; and much more.
@@ -34,12 +181,12 @@ Changelog
to Stephen C. Pope for the patch.
* :bug:`716` Fix a Python 3 compatibility issue when handling two-factor
authentication. Thanks to Mateusz Kowalski for the catch & original patch.
-* :support:`729 backported` Clean up ``setup.py`` to always use ``setuptools``,
- not doing so was a historical artifact from bygone days. Thanks to Alex
- Gaynor.
-* :bug:`649 major` Update the module in charge of handling SSH moduli so it's
- consistent with OpenSSH behavior re: prime number selection. Thanks to Damien
- Tournoud for catch & patch.
+* :support:`729 backported (>=1.15,<2.0)` Clean up ``setup.py`` to always use
+ ``setuptools``, not doing so was a historical artifact from bygone days.
+ Thanks to Alex Gaynor.
+* :bug:`649 major (==1.17)` Update the module in charge of handling SSH moduli
+ so it's consistent with OpenSSH behavior re: prime number selection. Thanks
+ to Damien Tournoud for catch & patch.
* :bug:`617` (aka `fabric/fabric#1429
<https://github.com/fabric/fabric/issues/1429>`_; via :issue:`679`; related:
:issue:`678`, :issue:`685`, :issue:`615` & :issue:`616`) Fix up
@@ -48,30 +195,32 @@ Changelog
for the report & Marius Gedminas for the patch.
* :bug:`613` (via :issue:`619`) Update to ``jaraco.windows`` 3.4.1 to fix some
errors related to ``ctypes`` on Windows platforms. Credit to Jason R. Coombs.
-* :support:`621 backported` Annotate some public attributes on
+* :support:`621 backported (>=1.15,<2.0)` Annotate some public attributes on
`~paramiko.channel.Channel` such as ``.closed``. Thanks to Sergey Vasilyev
for the report.
* :bug:`632` Fix logic bug in the SFTP client's callback-calling functionality;
previously there was a chance the given callback would fire twice at the end
of a transfer. Thanks to ``@ab9-er`` for catch & original patch.
-* :support:`612` Identify & work around a race condition in the test for
- handshake timeouts, which was causing frequent test failures for a subset of
- contributors as well as Travis-CI (usually, but not always, limited to Python
- 3.5). Props to Ed Kellett for assistance during some of the troubleshooting.
-* :support:`697` Remove whitespace in our ``setup.py``'s ``install_requires``
- as it triggers occasional bugs in some versions of ``setuptools``. Thanks to
- Justin Lecher for catch & original patch.
+* :support:`612 backported (>=1.15,<2.0)` Identify & work around a race
+ condition in the test for handshake timeouts, which was causing frequent test
+ failures for a subset of contributors as well as Travis-CI (usually, but not
+ always, limited to Python 3.5). Props to Ed Kellett for assistance during
+ some of the troubleshooting.
+* :support:`697 backported (>=1.15,<2.0)` Remove whitespace in our
+ ``setup.py``'s ``install_requires`` as it triggers occasional bugs in some
+ versions of ``setuptools``. Thanks to Justin Lecher for catch & original
+ patch.
* :bug:`499` Strip trailing/leading whitespace from lines when parsing SSH
config files - this brings things in line with OpenSSH behavior. Thanks to
Alfredo Esteban for the original report and Nick Pillitteri for the patch.
* :bug:`652` Fix behavior of ``gssapi-with-mic`` auth requests so they fail
gracefully (allowing followup via other auth methods) instead of raising an
exception. Patch courtesy of ``@jamercee``.
-* :feature:`588` Add missing file-like object methods for
+* :feature:`588 (==1.17)` Add missing file-like object methods for
`~paramiko.file.BufferedFile` and `~paramiko.sftp_file.SFTPFile`. Thanks to
Adam Meily for the patch.
-* :support:`636` Clean up and enhance the README (and rename it to
- ``README.rst`` from just ``README``). Thanks to ``@LucasRMehl``.
+* :support:`636 backported (>=1.15,<2.0)` Clean up and enhance the README (and
+ rename it to ``README.rst`` from just ``README``). Thanks to ``@LucasRMehl``.
* :release:`1.16.0 <2015-11-04>`
* :bug:`194 major` (also :issue:`562`, :issue:`530`, :issue:`576`) Streamline
use of ``stat`` when downloading SFTP files via `SFTPClient.get
@@ -94,7 +243,7 @@ Changelog
* :release:`1.15.4 <2015-11-02>`
* :release:`1.14.3 <2015-11-02>`
* :release:`1.13.4 <2015-11-02>`
-* :bug:`366` Fix `~paramiko.sftp_attributes.SFTPAttributes` so its string
+* :bug:`366` Fix `~paramiko.sftp_attr.SFTPAttributes` so its string
representation doesn't raise exceptions on empty/initialized instances. Patch
by Ulrich Petri.
* :bug:`359` Use correct attribute name when trying to use Python 3's
@@ -127,8 +276,8 @@ Changelog
class. Thanks to Jared Hance for the patch.
* :support:`516 backported` Document `~paramiko.agent.AgentRequestHandler`.
Thanks to ``@toejough`` for report & suggestions.
-* :bug:`496` Fix a handful of small but critical bugs in Paramiko's GSSAPI
- support (note: this includes switching from PyCrypo's Random to
+* :bug:`496 (1.15+)` Fix a handful of small but critical bugs in Paramiko's
+ GSSAPI support (note: this includes switching from PyCrypo's Random to
`os.urandom`). Thanks to Anselm Kruis for catch & patch.
* :bug:`491` (combines :issue:`62` and :issue:`439`) Implement timeout
functionality to address hangs from dropped network connections and/or failed
@@ -205,8 +354,9 @@ Changelog
* :release:`1.15.1 <2014-09-22>`
* :bug:`399` SSH agent forwarding (potentially other functionality as
well) would hang due to incorrect values passed into the new window size
- arguments for `.Transport` (thanks to a botched merge). This has been
- corrected. Thanks to Dylan Thacker-Smith for the report & patch.
+ arguments for `~paramiko.transport.Transport` (thanks to a botched merge).
+ This has been corrected. Thanks to Dylan Thacker-Smith for the report &
+ patch.
* :feature:`167` Add `~paramiko.config.SSHConfig.get_hostnames` for easier
introspection of a loaded SSH config file or object. Courtesy of Søren
Løvborg.
@@ -218,10 +368,10 @@ Changelog
(:ref:`installation docs here <gssapi>`). Mega thanks to Sebastian Deiß, with
assist by Torsten Landschoff.
- .. note::
- Unix users should be aware that the ``python-gssapi`` library (a
- requirement for using this functionality) only appears to support
- Python 2.7 and up at this time.
+ .. note::
+ Unix users should be aware that the ``python-gssapi`` library (a
+ requirement for using this functionality) only appears to support
+ Python 2.7 and up at this time.
* :bug:`346 major` Fix an issue in private key files' encryption salts that
could cause tracebacks and file corruption if keys were re-encrypted. Credit
diff --git a/sites/www/conf.py b/sites/www/conf.py
index 0b0fb85c..c7ba0a86 100644
--- a/sites/www/conf.py
+++ b/sites/www/conf.py
@@ -8,8 +8,7 @@ from shared_conf import *
# Releases changelog extension
extensions.append('releases')
-# Paramiko 1.x tags start with 'v'. Meh.
-releases_release_uri = "https://github.com/paramiko/paramiko/tree/v%s"
+releases_release_uri = "https://github.com/paramiko/paramiko/tree/%s"
releases_issue_uri = "https://github.com/paramiko/paramiko/issues/%s"
# Default is 'local' building, but reference the public docs site when building
diff --git a/sites/www/faq.rst b/sites/www/faq.rst
index a5d9b383..74b7501e 100644
--- a/sites/www/faq.rst
+++ b/sites/www/faq.rst
@@ -24,3 +24,13 @@ However, **closed does not imply locked** - affected users can still post
comments on such tickets - and **we will always consider actual patch
submissions for these issues**, provided they can get +1s from similarly
affected users and are proven to not break existing functionality.
+
+I'm having strange issues with my code hanging at shutdown!
+===========================================================
+
+Make sure you explicitly ``.close()`` your connection objects (usually
+``SSHClient``) if you're having any sort of hang/freeze at shutdown time!
+
+Doing so isn't strictly necessary 100% of the time, but it is almost always the
+right solution if you run into the various corner cases that cause race
+conditions, etc.
diff --git a/sites/www/installing-1.x.rst b/sites/www/installing-1.x.rst
index 0c2424bb..356fac49 100644
--- a/sites/www/installing-1.x.rst
+++ b/sites/www/installing-1.x.rst
@@ -48,6 +48,32 @@ Tools on the Mac, or the ``build-essential`` package on Ubuntu or Debian Linux
-- basically, anything with ``gcc``, ``make`` and so forth) as well as the
Python development libraries, often named ``python-dev`` or similar.
+Slow vs fast crypto math
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+PyCrypto attempts to use the ``gmp`` C math library if it is present on your
+system, which enables what it internally calls "fastmath" (``_fastmath.so``).
+When those headers are not available, it falls back to "slowmath"
+(``_slowmath.py``) which is a pure-Python implementation.
+
+Real-world tests have shown significant benefits to using the C version of this
+code; thus we strongly recommend you install the ``gmp`` development headers
+**before** installing Paramiko/PyCrypto. E.g.::
+
+ $ apt-get install libgmp-dev # or just apt
+ $ yum install gmp-devel # or dnf
+ $ brew install gmp
+
+If you're unsure which version of math you've ended up with, a quick way to
+check is to examine whether ``_fastmath.so`` or ``_slowmath.py`` appears in the
+output of::
+
+ from Crypto.PublicKey import RSA
+ print(RSA._impl._math)
+
+Windows
+~~~~~~~
+
For **Windows** users we recommend using :ref:`pypm`, installing a C
development environment such as `Cygwin <http://cygwin.com>`_ or obtaining a
precompiled Win32 PyCrypto package from `voidspace's Python modules page
diff --git a/sites/www/installing.rst b/sites/www/installing.rst
index 5a41a76b..6537b850 100644
--- a/sites/www/installing.rst
+++ b/sites/www/installing.rst
@@ -19,10 +19,6 @@ via `pip <http://pip-installer.org>`_::
$ pip install paramiko
-.. note::
- Users who want the bleeding edge can install the development version via
- ``pip install paramiko==dev``.
-
We currently support **Python 2.6, 2.7, 3.3+, and PyPy**. Users on Python 2.5
or older (or 3.2 or older) are urged to upgrade.
diff --git a/tasks.py b/tasks.py
index d2bed606..42c18bd0 100644
--- a/tasks.py
+++ b/tasks.py
@@ -1,10 +1,9 @@
-from os import mkdir
from os.path import join
from shutil import rmtree, copytree
-from invoke import Collection, ctask as task
+from invoke import Collection, task
from invocations.docs import docs, www, sites
-from invocations.packaging import publish
+from invocations.packaging.release import ns as release_coll, publish
# Until we move to spec-based testing
@@ -24,22 +23,43 @@ def coverage(ctx):
# Until we stop bundling docs w/ releases. Need to discover use cases first.
+# TODO: would be nice to tie this into our own version of build() too, but
+# still have publish() use that build()...really need to try out classes!
@task
-def release(ctx, sdist=True, wheel=True):
+def release(ctx, sdist=True, wheel=True, sign=True, dry_run=False):
"""
- Wraps invocations.packaging.release to add baked-in docs folder.
+ Wraps invocations.packaging.publish to add baked-in docs folder.
"""
# Build docs first. Use terribad workaround pending invoke #146
- ctx.run("inv docs")
+ ctx.run("inv docs", pty=True, hide=False)
# Move the built docs into where Epydocs used to live
target = 'docs'
rmtree(target, ignore_errors=True)
# TODO: make it easier to yank out this config val from the docs coll
copytree('sites/docs/_build', target)
# Publish
- publish(ctx, sdist=sdist, wheel=wheel)
+ publish(ctx, sdist=sdist, wheel=wheel, sign=sign, dry_run=dry_run)
# Remind
- print("\n\nDon't forget to update RTD's versions page for new minor releases!")
+ print("\n\nDon't forget to update RTD's versions page for new minor "
+ "releases!")
-ns = Collection(test, coverage, release, docs, www, sites)
+# TODO: "replace one task with another" needs a better public API, this is
+# using unpublished internals & skips all the stuff add_task() does re:
+# aliasing, defaults etc.
+release_coll.tasks['publish'] = release
+
+ns = Collection(test, coverage, release_coll, docs, www, sites)
+ns.configure({
+ 'packaging': {
+ # NOTE: many of these are also set in kwarg defaults above; but having
+ # them here too means once we get rid of our custom release(), the
+ # behavior stays.
+ 'sign': True,
+ 'wheel': True,
+ 'changelog_file': join(
+ www.configuration()['sphinx']['source'],
+ 'changelog.rst',
+ ),
+ },
+})
diff --git a/test.py b/test.py
index a1f13d85..7849c149 100755
--- a/test.py
+++ b/test.py
@@ -22,6 +22,7 @@
do the unit tests!
"""
+# flake8: noqa
import os
import re
import sys
diff --git a/tests/loop.py b/tests/loop.py
index 4f5dc163..e805ad96 100644
--- a/tests/loop.py
+++ b/tests/loop.py
@@ -37,9 +37,11 @@ class LoopSocket (object):
self.__cv = threading.Condition(self.__lock)
self.__timeout = None
self.__mate = None
+ self._closed = False
def close(self):
self.__unlink()
+ self._closed = True
try:
self.__lock.acquire()
self.__in_buffer = bytes()
diff --git a/tests/stub_sftp.py b/tests/stub_sftp.py
index 24380ba1..334af561 100644
--- a/tests/stub_sftp.py
+++ b/tests/stub_sftp.py
@@ -22,8 +22,10 @@ A stub SFTP server for loopback SFTP testing.
import os
import sys
-from paramiko import ServerInterface, SFTPServerInterface, SFTPServer, SFTPAttributes, \
- SFTPHandle, SFTP_OK, AUTH_SUCCESSFUL, OPEN_SUCCEEDED
+from paramiko import (
+ ServerInterface, SFTPServerInterface, SFTPServer, SFTPAttributes,
+ SFTPHandle, SFTP_OK, AUTH_SUCCESSFUL, OPEN_SUCCEEDED,
+)
from paramiko.common import o666
@@ -55,7 +57,7 @@ class StubSFTPHandle (SFTPHandle):
class StubSFTPServer (SFTPServerInterface):
# assume current folder is a fine root
- # (the tests always create and eventualy delete a subfolder, so there shouldn't be any mess)
+ # (the tests always create and eventually delete a subfolder, so there shouldn't be any mess)
ROOT = os.getcwd()
def _realpath(self, path):
diff --git a/tests/test_auth.py b/tests/test_auth.py
index 23517790..96f7611c 100644
--- a/tests/test_auth.py
+++ b/tests/test_auth.py
@@ -24,9 +24,10 @@ import sys
import threading
import unittest
-from paramiko import Transport, ServerInterface, RSAKey, DSSKey, \
- BadAuthenticationType, InteractiveQuery, \
- AuthenticationException
+from paramiko import (
+ Transport, ServerInterface, RSAKey, DSSKey, BadAuthenticationType,
+ InteractiveQuery, AuthenticationException,
+)
from paramiko import AUTH_FAILED, AUTH_PARTIALLY_SUCCESSFUL, AUTH_SUCCESSFUL
from paramiko.py3compat import u
from tests.loop import LoopSocket
diff --git a/tests/test_client.py b/tests/test_client.py
index f42d79d9..9c5761d6 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -182,7 +182,7 @@ class SSHClientTest (unittest.TestCase):
"""
verify that SSHClient works with an ECDSA key.
"""
- self._test_connection(key_filename=test_path('test_ecdsa.key'))
+ self._test_connection(key_filename=test_path('test_ecdsa_256.key'))
def test_3_multiple_key_files(self):
"""
@@ -199,8 +199,8 @@ class SSHClientTest (unittest.TestCase):
for attempt, accept in (
(['rsa', 'dss'], ['dss']), # Original test #3
(['dss', 'rsa'], ['dss']), # Ordering matters sometimes, sadly
- (['dss', 'rsa', 'ecdsa'], ['dss']), # Try ECDSA but fail
- (['rsa', 'ecdsa'], ['ecdsa']), # ECDSA success
+ (['dss', 'rsa', 'ecdsa_256'], ['dss']), # Try ECDSA but fail
+ (['rsa', 'ecdsa_256'], ['ecdsa']), # ECDSA success
):
try:
self._test_connection(
@@ -357,7 +357,7 @@ class SSHClientTest (unittest.TestCase):
# NOTE: re #387, re #394
# If pkey module used within Client._auth isn't correctly handling auth
# errors (e.g. if it allows things like ValueError to bubble up as per
- # midway thru #394) client.connect() will fail (at key load step)
+ # midway through #394) client.connect() will fail (at key load step)
# instead of succeeding (at password step)
kwargs = dict(
# Password-protected key whose passphrase is not 'pygmalion' (it's
diff --git a/tests/test_ecdsa.key b/tests/test_ecdsa_256.key
index 42d44734..42d44734 100644
--- a/tests/test_ecdsa.key
+++ b/tests/test_ecdsa_256.key
diff --git a/tests/test_ecdsa_384.key b/tests/test_ecdsa_384.key
new file mode 100644
index 00000000..796bf417
--- /dev/null
+++ b/tests/test_ecdsa_384.key
@@ -0,0 +1,6 @@
+-----BEGIN EC PRIVATE KEY-----
+MIGkAgEBBDBDdO8IXvlLJgM7+sNtPl7tI7FM5kzuEUEEPRjXIPQM7mISciwJPBt+
+y43EuG8nL4mgBwYFK4EEACKhZANiAAQWxom0C1vQAGYhjdoREMVmGKBWlisDdzyk
+mgyUjKpiJ9WfbIEVLsPGP8OdNjhr1y/8BZNIts+dJd6VmYw+4HzB+4F+U1Igs8K0
+JEvh59VNkvWheViadDXCM2MV8Nq+DNg=
+-----END EC PRIVATE KEY-----
diff --git a/tests/test_ecdsa_521.key b/tests/test_ecdsa_521.key
new file mode 100644
index 00000000..b87dc90f
--- /dev/null
+++ b/tests/test_ecdsa_521.key
@@ -0,0 +1,7 @@
+-----BEGIN EC PRIVATE KEY-----
+MIHcAgEBBEIAprQtAS3OF6iVUkT8IowTHWicHzShGgk86EtuEXvfQnhZFKsWm6Jo
+iqAr1yEaiuI9LfB3Xs8cjuhgEEfbduYr/f6gBwYFK4EEACOhgYkDgYYABACaOaFL
+ZGuxa5AW16qj6VLypFbLrEWrt9AZUloCMefxO8bNLjK/O5g0rAVasar1TnyHE9qj
+4NwzANZASWjQNbc4MAG8vzqezFwLIn/kNyNTsXNfqEko9OgHZknlj2Z79dwTJcRA
+L4QLcT5aND0EHZLB2fAUDXiWIb2j4rg1mwPlBMiBXA==
+-----END EC PRIVATE KEY-----
diff --git a/tests/test_ecdsa_password.key b/tests/test_ecdsa_password_256.key
index eb7910ed..eb7910ed 100644
--- a/tests/test_ecdsa_password.key
+++ b/tests/test_ecdsa_password_256.key
diff --git a/tests/test_ecdsa_password_384.key b/tests/test_ecdsa_password_384.key
new file mode 100644
index 00000000..eba33c14
--- /dev/null
+++ b/tests/test_ecdsa_password_384.key
@@ -0,0 +1,9 @@
+-----BEGIN EC PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: AES-128-CBC,7F7B5DBE4CE040D822441AFE7A023A1D
+
+y/d6tGonAXYgJniQoFCdto+CuT1y1s41qzwNLN9YdNq/+R/dtQvZAaOuGtHJRFE6
+wWabhY1bSjavVPT2z1Zw1jhDJX5HGrf9LDoyORKtUWtUJoUvGdYLHbcg8Q+//WRf
+R0A01YuSw1SJX0a225S1aRcsDAk1k5F8EMb8QzSSDgjAOI8ldQF35JI+ofNSGjgS
+BPOlorQXTJxDOGmokw/Wql6MbhajXKPO39H2Z53W88U=
+-----END EC PRIVATE KEY-----
diff --git a/tests/test_ecdsa_password_521.key b/tests/test_ecdsa_password_521.key
new file mode 100644
index 00000000..5986b930
--- /dev/null
+++ b/tests/test_ecdsa_password_521.key
@@ -0,0 +1,10 @@
+-----BEGIN EC PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: AES-128-CBC,AEB2DE62C65D1A88C4940A3476B2F10A
+
+5kNk/FFPbHa0402QTrgpIT28uirJ4Amvb2/ryOEyOCe0NPbTLCqlQekj2RFYH2Un
+pgCLUDkelKQv4pyuK8qWS7R+cFjE/gHHCPUWkK3djZUC8DKuA9lUKeQIE+V1vBHc
+L5G+MpoYrPgaydcGx/Uqnc/kVuZx1DXLwrGGtgwNROVBtmjXC9EdfeXHLL1y0wvH
+paNgacJpUtgqJEmiehf7eL/eiReegG553rZK3jjfboGkREUaKR5XOgamiKUtgKoc
+sMpImVYCsRKd/9RI+VOqErZaEvy/9j0Ye3iH32wGOaA=
+-----END EC PRIVATE KEY-----
diff --git a/tests/test_gssapi.py b/tests/test_gssapi.py
index 96c268d9..bc220108 100644
--- a/tests/test_gssapi.py
+++ b/tests/test_gssapi.py
@@ -104,9 +104,11 @@ class GSSAPITest(unittest.TestCase):
status = gss_srv_ctxt.verify_mic(mic_msg, mic_token)
self.assertEquals(0, status)
else:
- gss_flags = sspicon.ISC_REQ_INTEGRITY |\
- sspicon.ISC_REQ_MUTUAL_AUTH |\
- sspicon.ISC_REQ_DELEGATE
+ gss_flags = (
+ sspicon.ISC_REQ_INTEGRITY |
+ sspicon.ISC_REQ_MUTUAL_AUTH |
+ sspicon.ISC_REQ_DELEGATE
+ )
# Initialize a GSS-API context.
target_name = "host/" + socket.getfqdn(targ_name)
gss_ctxt = sspi.ClientAuth("Kerberos",
diff --git a/tests/test_hostkeys.py b/tests/test_hostkeys.py
index 2bdcad9c..2c7ceeb9 100644
--- a/tests/test_hostkeys.py
+++ b/tests/test_hostkeys.py
@@ -115,3 +115,15 @@ class HostKeysTest (unittest.TestCase):
self.assertEqual(b'7EC91BB336CB6D810B124B1353C32396', fp)
fp = hexlify(hostdict['secure.example.com']['ssh-dss'].get_fingerprint()).upper()
self.assertEqual(b'4478F0B9A23CC5182009FF755BC1D26C', fp)
+
+ def test_delitem(self):
+ hostdict = paramiko.HostKeys('hostfile.temp')
+ target = 'happy.example.com'
+ entry = hostdict[target] # will KeyError if not present
+ del hostdict[target]
+ try:
+ entry = hostdict[target]
+ except KeyError:
+ pass # Good
+ else:
+ assert False, "Entry was not deleted from HostKeys on delitem!"
diff --git a/tests/test_packetizer.py b/tests/test_packetizer.py
index ccfe26bd..02173292 100644
--- a/tests/test_packetizer.py
+++ b/tests/test_packetizer.py
@@ -20,6 +20,7 @@
Some unit tests for the ssh2 protocol in Transport.
"""
+import sys
import unittest
from hashlib import sha1
@@ -34,7 +35,6 @@ from paramiko.common import byte_chr, zero_byte
x55 = byte_chr(0x55)
x1f = byte_chr(0x1f)
-
class PacketizerTest (unittest.TestCase):
def test_1_write(self):
@@ -85,6 +85,8 @@ class PacketizerTest (unittest.TestCase):
self.assertEqual(900, m.get_int())
def test_3_closed(self):
+ if sys.platform.startswith("win"): # no SIGALRM on windows
+ return
rsock = LoopSocket()
wsock = LoopSocket()
rsock.link(wsock)
@@ -112,9 +114,13 @@ class PacketizerTest (unittest.TestCase):
import signal
class TimeoutError(Exception):
- pass
+ def __init__(self, error_message):
+ if hasattr(errno, 'ETIME'):
+ self.message = os.sterror(errno.ETIME)
+ else:
+ self.messaage = error_message
- def timeout(seconds=1, error_message=os.strerror(errno.ETIME)):
+ def timeout(seconds=1, error_message='Timer expired'):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
diff --git a/tests/test_pkey.py b/tests/test_pkey.py
index ec128140..24d78c3e 100644
--- a/tests/test_pkey.py
+++ b/tests/test_pkey.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
@@ -24,20 +25,25 @@ import unittest
import os
from binascii import hexlify
from hashlib import md5
+import base64
from paramiko import RSAKey, DSSKey, ECDSAKey, Message, util
-from paramiko.py3compat import StringIO, byte_chr, b, bytes
+from paramiko.py3compat import StringIO, byte_chr, b, bytes, PY2
from tests.util import test_path
# from openssh's ssh-keygen
PUB_RSA = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA049W6geFpmsljTwfvI1UmKWWJPNFI74+vNKTk4dmzkQY2yAMs6FhlvhlI8ysU4oj71ZsRYMecHbBbxdN79+JRFVYTKaLqjwGENeTd+yv4q+V2PvZv3fLnzApI3l7EJCqhWwJUHJ1jAkZzqDx0tyOL4uoZpww3nmE0kb3y21tH4c='
PUB_DSS = 'ssh-dss AAAAB3NzaC1kc3MAAACBAOeBpgNnfRzr/twmAQRu2XwWAp3CFtrVnug6s6fgwj/oLjYbVtjAy6pl/h0EKCWx2rf1IetyNsTxWrniA9I6HeDj65X1FyDkg6g8tvCnaNB8Xp/UUhuzHuGsMIipRxBxw9LF608EqZcj1E3ytktoW5B5OcjrkEoz3xG7C+rpIjYvAAAAFQDwz4UnmsGiSNu5iqjn3uTzwUpshwAAAIEAkxfFeY8P2wZpDjX0MimZl5wkoFQDL25cPzGBuB4OnB8NoUk/yjAHIIpEShw8V+LzouMK5CTJQo5+Ngw3qIch/WgRmMHy4kBq1SsXMjQCte1So6HBMvBPIW5SiMTmjCfZZiw4AYHK+B/JaOwaG9yRg2Ejg4Ok10+XFDxlqZo8Y+wAAACARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lYukmnjO1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+NwacIBlXa8cMDL7Q/69o0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgE='
-PUB_ECDSA = 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJSPZm3ZWkvk/Zx8WP+fZRZ5/NBBHnGQwR6uIC6XHGPDIHuWUzIjAwA0bzqkOUffEsbLe+uQgKl5kbc/L8KA/eo='
+PUB_ECDSA_256 = 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJSPZm3ZWkvk/Zx8WP+fZRZ5/NBBHnGQwR6uIC6XHGPDIHuWUzIjAwA0bzqkOUffEsbLe+uQgKl5kbc/L8KA/eo='
+PUB_ECDSA_384 = 'ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBBbGibQLW9AAZiGN2hEQxWYYoFaWKwN3PKSaDJSMqmIn1Z9sgRUuw8Y/w502OGvXL/wFk0i2z50l3pWZjD7gfMH7gX5TUiCzwrQkS+Hn1U2S9aF5WJp0NcIzYxXw2r4M2A=='
+PUB_ECDSA_521 = 'ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBACaOaFLZGuxa5AW16qj6VLypFbLrEWrt9AZUloCMefxO8bNLjK/O5g0rAVasar1TnyHE9qj4NwzANZASWjQNbc4MAG8vzqezFwLIn/kNyNTsXNfqEko9OgHZknlj2Z79dwTJcRAL4QLcT5aND0EHZLB2fAUDXiWIb2j4rg1mwPlBMiBXA=='
FINGER_RSA = '1024 60:73:38:44:cb:51:86:65:7f:de:da:a2:2b:5a:57:d5'
FINGER_DSS = '1024 44:78:f0:b9:a2:3c:c5:18:20:09:ff:75:5b:c1:d2:6c'
-FINGER_ECDSA = '256 25:19:eb:55:e6:a1:47:ff:4f:38:d2:75:6f:a5:d5:60'
+FINGER_ECDSA_256 = '256 25:19:eb:55:e6:a1:47:ff:4f:38:d2:75:6f:a5:d5:60'
+FINGER_ECDSA_384 = '384 c1:8d:a0:59:09:47:41:8e:a8:a6:07:01:29:23:b4:65'
+FINGER_ECDSA_521 = '521 44:58:22:52:12:33:16:0e:ce:0e:be:2c:7c:7e:cc:1e'
SIGNED_RSA = '20:d7:8a:31:21:cb:f7:92:12:f2:a4:89:37:f5:78:af:e6:16:b6:25:b9:97:3d:a2:cd:5f:ca:20:21:73:4c:ad:34:73:8f:20:77:28:e2:94:15:08:d8:91:40:7a:85:83:bf:18:37:95:dc:54:1a:9b:88:29:6c:73:ca:38:b4:04:f1:56:b9:f2:42:9d:52:1b:29:29:b4:4f:fd:c9:2d:af:47:d2:40:76:30:f3:63:45:0c:d9:1d:43:86:0f:1c:70:e2:93:12:34:f3:ac:c5:0a:2f:14:50:66:59:f1:88:ee:c1:4a:e9:d1:9c:4e:46:f0:0e:47:6f:38:74:f1:44:a8'
RSA_PRIVATE_OUT = """\
@@ -73,7 +79,7 @@ h9pT9XHqn+1rZ4bK+QGA
-----END DSA PRIVATE KEY-----
"""
-ECDSA_PRIVATE_OUT = """\
+ECDSA_PRIVATE_OUT_256 = """\
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIKB6ty3yVyKEnfF/zprx0qwC76MsMlHY4HXCnqho2eKioAoGCCqGSM49
AwEHoUQDQgAElI9mbdlaS+T9nHxY/59lFnn80EEecZDBHq4gLpccY8Mge5ZTMiMD
@@ -81,8 +87,30 @@ ADRvOqQ5R98Sxst765CAqXmRtz8vwoD96g==
-----END EC PRIVATE KEY-----
"""
+ECDSA_PRIVATE_OUT_384 = """\
+-----BEGIN EC PRIVATE KEY-----
+MIGkAgEBBDBDdO8IXvlLJgM7+sNtPl7tI7FM5kzuEUEEPRjXIPQM7mISciwJPBt+
+y43EuG8nL4mgBwYFK4EEACKhZANiAAQWxom0C1vQAGYhjdoREMVmGKBWlisDdzyk
+mgyUjKpiJ9WfbIEVLsPGP8OdNjhr1y/8BZNIts+dJd6VmYw+4HzB+4F+U1Igs8K0
+JEvh59VNkvWheViadDXCM2MV8Nq+DNg=
+-----END EC PRIVATE KEY-----
+"""
+
+ECDSA_PRIVATE_OUT_521 = """\
+-----BEGIN EC PRIVATE KEY-----
+MIHcAgEBBEIAprQtAS3OF6iVUkT8IowTHWicHzShGgk86EtuEXvfQnhZFKsWm6Jo
+iqAr1yEaiuI9LfB3Xs8cjuhgEEfbduYr/f6gBwYFK4EEACOhgYkDgYYABACaOaFL
+ZGuxa5AW16qj6VLypFbLrEWrt9AZUloCMefxO8bNLjK/O5g0rAVasar1TnyHE9qj
+4NwzANZASWjQNbc4MAG8vzqezFwLIn/kNyNTsXNfqEko9OgHZknlj2Z79dwTJcRA
+L4QLcT5aND0EHZLB2fAUDXiWIb2j4rg1mwPlBMiBXA==
+-----END EC PRIVATE KEY-----
+"""
+
x1234 = b'\x01\x02\x03\x04'
+TEST_KEY_BYTESTR_2 = '\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01#\x00\x00\x00\x81\x00\xd3\x8fV\xea\x07\x85\xa6k%\x8d<\x1f\xbc\x8dT\x98\xa5\x96$\xf3E#\xbe>\xbc\xd2\x93\x93\x87f\xceD\x18\xdb \x0c\xb3\xa1a\x96\xf8e#\xcc\xacS\x8a#\xefVlE\x83\x1epv\xc1o\x17M\xef\xdf\x89DUXL\xa6\x8b\xaa<\x06\x10\xd7\x93w\xec\xaf\xe2\xaf\x95\xd8\xfb\xd9\xbfw\xcb\x9f0)#y{\x10\x90\xaa\x85l\tPru\x8c\t\x19\xce\xa0\xf1\xd2\xdc\x8e/\x8b\xa8f\x9c0\xdey\x84\xd2F\xf7\xcbmm\x1f\x87'
+TEST_KEY_BYTESTR_3 = '\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01#\x00\x00\x00\x00ӏV\x07k%<\x1fT$E#>ғfD\x18 \x0cae#̬S#VlE\x1epvo\x17M߉DUXL<\x06\x10דw\u2bd5ٿw˟0)#y{\x10l\tPru\t\x19Π\u070e/f0yFmm\x1f'
+
class KeyTest (unittest.TestCase):
@@ -205,43 +233,72 @@ class KeyTest (unittest.TestCase):
msg.rewind()
self.assertTrue(key.verify_ssh_sig(b'jerri blank', msg))
- def test_10_load_ecdsa(self):
- key = ECDSAKey.from_private_key_file(test_path('test_ecdsa.key'))
+ def test_C_generate_ecdsa(self):
+ key = ECDSAKey.generate()
+ msg = key.sign_ssh_data(b'jerri blank')
+ msg.rewind()
+ self.assertTrue(key.verify_ssh_sig(b'jerri blank', msg))
+ self.assertEqual(key.get_bits(), 256)
+ self.assertEqual(key.get_name(), 'ecdsa-sha2-nistp256')
+
+ key = ECDSAKey.generate(bits=256)
+ msg = key.sign_ssh_data(b'jerri blank')
+ msg.rewind()
+ self.assertTrue(key.verify_ssh_sig(b'jerri blank', msg))
+ self.assertEqual(key.get_bits(), 256)
+ self.assertEqual(key.get_name(), 'ecdsa-sha2-nistp256')
+
+ key = ECDSAKey.generate(bits=384)
+ msg = key.sign_ssh_data(b'jerri blank')
+ msg.rewind()
+ self.assertTrue(key.verify_ssh_sig(b'jerri blank', msg))
+ self.assertEqual(key.get_bits(), 384)
+ self.assertEqual(key.get_name(), 'ecdsa-sha2-nistp384')
+
+ key = ECDSAKey.generate(bits=521)
+ msg = key.sign_ssh_data(b'jerri blank')
+ msg.rewind()
+ self.assertTrue(key.verify_ssh_sig(b'jerri blank', msg))
+ self.assertEqual(key.get_bits(), 521)
+ self.assertEqual(key.get_name(), 'ecdsa-sha2-nistp521')
+
+ def test_10_load_ecdsa_256(self):
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_256.key'))
self.assertEqual('ecdsa-sha2-nistp256', key.get_name())
- exp_ecdsa = b(FINGER_ECDSA.split()[1].replace(':', ''))
+ exp_ecdsa = b(FINGER_ECDSA_256.split()[1].replace(':', ''))
my_ecdsa = hexlify(key.get_fingerprint())
self.assertEqual(exp_ecdsa, my_ecdsa)
- self.assertEqual(PUB_ECDSA.split()[1], key.get_base64())
+ self.assertEqual(PUB_ECDSA_256.split()[1], key.get_base64())
self.assertEqual(256, key.get_bits())
s = StringIO()
key.write_private_key(s)
- self.assertEqual(ECDSA_PRIVATE_OUT, s.getvalue())
+ self.assertEqual(ECDSA_PRIVATE_OUT_256, s.getvalue())
s.seek(0)
key2 = ECDSAKey.from_private_key(s)
self.assertEqual(key, key2)
- def test_11_load_ecdsa_password(self):
- key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_password.key'), b'television')
+ def test_11_load_ecdsa_password_256(self):
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_password_256.key'), b'television')
self.assertEqual('ecdsa-sha2-nistp256', key.get_name())
- exp_ecdsa = b(FINGER_ECDSA.split()[1].replace(':', ''))
+ exp_ecdsa = b(FINGER_ECDSA_256.split()[1].replace(':', ''))
my_ecdsa = hexlify(key.get_fingerprint())
self.assertEqual(exp_ecdsa, my_ecdsa)
- self.assertEqual(PUB_ECDSA.split()[1], key.get_base64())
+ self.assertEqual(PUB_ECDSA_256.split()[1], key.get_base64())
self.assertEqual(256, key.get_bits())
- def test_12_compare_ecdsa(self):
+ def test_12_compare_ecdsa_256(self):
# verify that the private & public keys compare equal
- key = ECDSAKey.from_private_key_file(test_path('test_ecdsa.key'))
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_256.key'))
self.assertEqual(key, key)
pub = ECDSAKey(data=key.asbytes())
self.assertTrue(key.can_sign())
self.assertTrue(not pub.can_sign())
self.assertEqual(key, pub)
- def test_13_sign_ecdsa(self):
+ def test_13_sign_ecdsa_256(self):
# verify that the rsa private key can sign and verify
- key = ECDSAKey.from_private_key_file(test_path('test_ecdsa.key'))
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_256.key'))
msg = key.sign_ssh_data(b'ice weasels')
self.assertTrue(type(msg) is Message)
msg.rewind()
@@ -255,6 +312,109 @@ class KeyTest (unittest.TestCase):
pub = ECDSAKey(data=key.asbytes())
self.assertTrue(pub.verify_ssh_sig(b'ice weasels', msg))
+ def test_14_load_ecdsa_384(self):
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_384.key'))
+ self.assertEqual('ecdsa-sha2-nistp384', key.get_name())
+ exp_ecdsa = b(FINGER_ECDSA_384.split()[1].replace(':', ''))
+ my_ecdsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_ecdsa, my_ecdsa)
+ self.assertEqual(PUB_ECDSA_384.split()[1], key.get_base64())
+ self.assertEqual(384, key.get_bits())
+
+ s = StringIO()
+ key.write_private_key(s)
+ self.assertEqual(ECDSA_PRIVATE_OUT_384, s.getvalue())
+ s.seek(0)
+ key2 = ECDSAKey.from_private_key(s)
+ self.assertEqual(key, key2)
+
+ def test_15_load_ecdsa_password_384(self):
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_password_384.key'), b'television')
+ self.assertEqual('ecdsa-sha2-nistp384', key.get_name())
+ exp_ecdsa = b(FINGER_ECDSA_384.split()[1].replace(':', ''))
+ my_ecdsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_ecdsa, my_ecdsa)
+ self.assertEqual(PUB_ECDSA_384.split()[1], key.get_base64())
+ self.assertEqual(384, key.get_bits())
+
+ def test_16_compare_ecdsa_384(self):
+ # verify that the private & public keys compare equal
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_384.key'))
+ self.assertEqual(key, key)
+ pub = ECDSAKey(data=key.asbytes())
+ self.assertTrue(key.can_sign())
+ self.assertTrue(not pub.can_sign())
+ self.assertEqual(key, pub)
+
+ def test_17_sign_ecdsa_384(self):
+ # verify that the rsa private key can sign and verify
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_384.key'))
+ msg = key.sign_ssh_data(b'ice weasels')
+ self.assertTrue(type(msg) is Message)
+ msg.rewind()
+ self.assertEqual('ecdsa-sha2-nistp384', msg.get_text())
+ # ECDSA signatures, like DSS signatures, tend to be different
+ # each time, so we can't compare against a "known correct"
+ # signature.
+ # Even the length of the signature can change.
+
+ msg.rewind()
+ pub = ECDSAKey(data=key.asbytes())
+ self.assertTrue(pub.verify_ssh_sig(b'ice weasels', msg))
+
+ def test_18_load_ecdsa_521(self):
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_521.key'))
+ self.assertEqual('ecdsa-sha2-nistp521', key.get_name())
+ exp_ecdsa = b(FINGER_ECDSA_521.split()[1].replace(':', ''))
+ my_ecdsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_ecdsa, my_ecdsa)
+ self.assertEqual(PUB_ECDSA_521.split()[1], key.get_base64())
+ self.assertEqual(521, key.get_bits())
+
+ s = StringIO()
+ key.write_private_key(s)
+ # Different versions of OpenSSL (SSLeay versions 0x1000100f and
+ # 0x1000207f for instance) use different apparently valid (as far as
+ # ssh-keygen is concerned) padding. So we can't check the actual value
+ # of the pem encoded key.
+ s.seek(0)
+ key2 = ECDSAKey.from_private_key(s)
+ self.assertEqual(key, key2)
+
+ def test_19_load_ecdsa_password_521(self):
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_password_521.key'), b'television')
+ self.assertEqual('ecdsa-sha2-nistp521', key.get_name())
+ exp_ecdsa = b(FINGER_ECDSA_521.split()[1].replace(':', ''))
+ my_ecdsa = hexlify(key.get_fingerprint())
+ self.assertEqual(exp_ecdsa, my_ecdsa)
+ self.assertEqual(PUB_ECDSA_521.split()[1], key.get_base64())
+ self.assertEqual(521, key.get_bits())
+
+ def test_20_compare_ecdsa_521(self):
+ # verify that the private & public keys compare equal
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_521.key'))
+ self.assertEqual(key, key)
+ pub = ECDSAKey(data=key.asbytes())
+ self.assertTrue(key.can_sign())
+ self.assertTrue(not pub.can_sign())
+ self.assertEqual(key, pub)
+
+ def test_21_sign_ecdsa_521(self):
+ # verify that the rsa private key can sign and verify
+ key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_521.key'))
+ msg = key.sign_ssh_data(b'ice weasels')
+ self.assertTrue(type(msg) is Message)
+ msg.rewind()
+ self.assertEqual('ecdsa-sha2-nistp521', msg.get_text())
+ # ECDSA signatures, like DSS signatures, tend to be different
+ # each time, so we can't compare against a "known correct"
+ # signature.
+ # Even the length of the signature can change.
+
+ msg.rewind()
+ pub = ECDSAKey(data=key.asbytes())
+ self.assertTrue(pub.verify_ssh_sig(b'ice weasels', msg))
+
def test_salt_size(self):
# Read an existing encrypted private key
file_ = test_path('test_rsa_password.key')
@@ -271,3 +431,8 @@ class KeyTest (unittest.TestCase):
self.assertEqual(key, key2)
finally:
os.remove(newfile)
+
+ def test_stringification(self):
+ key = RSAKey.from_private_key_file(test_path('test_rsa.key'))
+ comparable = TEST_KEY_BYTESTR_2 if PY2 else TEST_KEY_BYTESTR_3
+ self.assertEqual(str(key), comparable)
diff --git a/tests/test_sftp.py b/tests/test_sftp.py
index e4c2c3a3..d3064fff 100755
--- a/tests/test_sftp.py
+++ b/tests/test_sftp.py
@@ -413,7 +413,7 @@ class SFTPTest (unittest.TestCase):
def test_A_readline_seek(self):
"""
create a text file and write a bunch of text into it. then count the lines
- in the file, and seek around to retreive particular lines. this should
+ in the file, and seek around to retrieve particular lines. this should
verify that read buffering and 'tell' work well together, and that read
buffering is reset on 'seek'.
"""
diff --git a/tests/test_ssh_gss.py b/tests/test_ssh_gss.py
index e20d348f..967b3b81 100644
--- a/tests/test_ssh_gss.py
+++ b/tests/test_ssh_gss.py
@@ -43,9 +43,7 @@ class NullServer (paramiko.ServerInterface):
return paramiko.AUTH_FAILED
def enable_auth_gssapi(self):
- UseGSSAPI = True
- GSSAPICleanupCredentials = True
- return UseGSSAPI
+ return True
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
diff --git a/tests/test_transport.py b/tests/test_transport.py
index 5069e5b0..c426cef1 100644
--- a/tests/test_transport.py
+++ b/tests/test_transport.py
@@ -31,13 +31,16 @@ import random
from hashlib import sha1
import unittest
-from paramiko import Transport, SecurityOptions, ServerInterface, RSAKey, DSSKey, \
- SSHException, ChannelException, Packetizer
+from paramiko import (
+ Transport, SecurityOptions, ServerInterface, RSAKey, DSSKey, SSHException,
+ ChannelException, Packetizer,
+)
from paramiko import AUTH_FAILED, AUTH_SUCCESSFUL
from paramiko import OPEN_SUCCEEDED, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
-from paramiko.common import MSG_KEXINIT, cMSG_CHANNEL_WINDOW_ADJUST, \
- MIN_PACKET_SIZE, MIN_WINDOW_SIZE, MAX_WINDOW_SIZE, \
- DEFAULT_WINDOW_SIZE, DEFAULT_MAX_PACKET_SIZE
+from paramiko.common import (
+ MSG_KEXINIT, cMSG_CHANNEL_WINDOW_ADJUST, MIN_PACKET_SIZE, MIN_WINDOW_SIZE,
+ MAX_WINDOW_SIZE, DEFAULT_WINDOW_SIZE, DEFAULT_MAX_PACKET_SIZE,
+)
from paramiko.py3compat import bytes
from paramiko.message import Message
from tests.loop import LoopSocket
@@ -162,6 +165,15 @@ class TransportTest(unittest.TestCase):
except TypeError:
pass
+ def test_1b_security_options_reset(self):
+ o = self.tc.get_security_options()
+ # should not throw any exceptions
+ o.ciphers = o.ciphers
+ o.digests = o.digests
+ o.key_types = o.key_types
+ o.kex = o.kex
+ o.compression = o.compression
+
def test_2_compute_key(self):
self.tc.K = 123281095979686581523377256114209720774539068973101330872763622971399429481072519713536292772709507296759612401802191955568143056534122385270077606457721553469730659233569339356140085284052436697480759510519672848743794433460113118986816826624865291116513647975790797391795651716378444844877749505443714557929
self.tc.H = b'\x0C\x83\x07\xCD\xE6\x85\x6F\xF3\x0B\xA9\x36\x84\xEB\x0F\x04\xC2\x52\x0E\x9E\xD3'
@@ -828,3 +840,21 @@ class TransportTest(unittest.TestCase):
hostkey=public_host_key,
username='slowdive',
password='pygmalion')
+
+ def test_M_select_after_close(self):
+ """
+ verify that select works when a channel is already closed.
+ """
+ self.setup_test_server()
+ chan = self.tc.open_session()
+ chan.invoke_shell()
+ schan = self.ts.accept(1.0)
+ schan.close()
+
+ # give client a moment to receive close notification
+ time.sleep(0.1)
+
+ r, w, e = select.select([chan], [], [], 0.1)
+ self.assertEqual([chan], r)
+ self.assertEqual([], w)
+ self.assertEqual([], e)
diff --git a/tests/test_util.py b/tests/test_util.py
index a6a2c30b..7880e156 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -66,7 +66,7 @@ from paramiko import *
class UtilTest(unittest.TestCase):
- def test_1_import(self):
+ def test_import(self):
"""
verify that all the classes can be imported from paramiko.
"""
@@ -104,7 +104,7 @@ class UtilTest(unittest.TestCase):
self.assertTrue('SSHConfig' in symbols)
self.assertTrue('util' in symbols)
- def test_2_parse_config(self):
+ def test_parse_config(self):
global test_config_file
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
@@ -114,7 +114,7 @@ class UtilTest(unittest.TestCase):
{'host': ['*'], 'config': {'crazy': 'something dumb'}},
{'host': ['spoo.example.com'], 'config': {'crazy': 'something else'}}])
- def test_3_host_config(self):
+ def test_host_config(self):
global test_config_file
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
@@ -141,12 +141,12 @@ class UtilTest(unittest.TestCase):
values
)
- def test_4_generate_key_bytes(self):
+ def test_generate_key_bytes(self):
x = paramiko.util.generate_key_bytes(sha1, b'ABCDEFGH', 'This is my secret passphrase.', 64)
hex = ''.join(['%02x' % byte_ord(c) for c in x])
self.assertEqual(hex, '9110e2f6793b69363e58173e9436b13a5a4b339005741d5c680e505f57d871347b4239f14fb5c46e857d5e100424873ba849ac699cea98d729e57b3e84378e8b')
- def test_5_host_keys(self):
+ def test_host_keys(self):
with open('hostfile.temp', 'w') as f:
f.write(test_hosts_file)
try:
@@ -159,7 +159,7 @@ class UtilTest(unittest.TestCase):
finally:
os.unlink('hostfile.temp')
- def test_7_host_config_expose_issue_33(self):
+ def test_host_config_expose_issue_33(self):
test_config_file = """
Host www13.*
Port 22
@@ -178,7 +178,7 @@ Host *
{'hostname': host, 'port': '22'}
)
- def test_8_eintr_retry(self):
+ def test_eintr_retry(self):
self.assertEqual('foo', paramiko.util.retry_on_signal(lambda: 'foo'))
# Variables that are set by raises_intr
@@ -203,7 +203,7 @@ Host *
self.assertRaises(AssertionError,
lambda: paramiko.util.retry_on_signal(raises_other_exception))
- def test_9_proxycommand_config_equals_parsing(self):
+ def test_proxycommand_config_equals_parsing(self):
"""
ProxyCommand should not split on equals signs within the value.
"""
@@ -222,7 +222,7 @@ Host equals-delimited
'foo bar=biz baz'
)
- def test_10_proxycommand_interpolation(self):
+ def test_proxycommand_interpolation(self):
"""
ProxyCommand should perform interpolation on the value
"""
@@ -248,7 +248,20 @@ Host *
val
)
- def test_11_host_config_test_negation(self):
+ def test_proxycommand_tilde_expansion(self):
+ """
+ Tilde (~) should be expanded inside ProxyCommand
+ """
+ config = paramiko.util.parse_ssh_config(StringIO("""
+Host test
+ ProxyCommand ssh -F ~/.ssh/test_config bastion nc %h %p
+"""))
+ self.assertEqual(
+ 'ssh -F %s/.ssh/test_config bastion nc test 22' % os.path.expanduser('~'),
+ host_config('test', config)['proxycommand']
+ )
+
+ def test_host_config_test_negation(self):
test_config_file = """
Host www13.* !*.example.com
Port 22
@@ -270,7 +283,7 @@ Host *
{'hostname': host, 'port': '8080'}
)
- def test_12_host_config_test_proxycommand(self):
+ def test_host_config_test_proxycommand(self):
test_config_file = """
Host proxy-with-equal-divisor-and-space
ProxyCommand = foo=bar
@@ -298,7 +311,7 @@ ProxyCommand foo=bar:%h-%p
values
)
- def test_11_host_config_test_identityfile(self):
+ def test_host_config_test_identityfile(self):
test_config_file = """
IdentityFile id_dsa0
@@ -328,7 +341,7 @@ IdentityFile id_dsa22
values
)
- def test_12_config_addressfamily_and_lazy_fqdn(self):
+ def test_config_addressfamily_and_lazy_fqdn(self):
"""
Ensure the code path honoring non-'all' AddressFamily doesn't asplode
"""
@@ -344,13 +357,13 @@ IdentityFile something_%l_using_fqdn
self.assertEqual(32767, paramiko.util.clamp_value(32767, 32765, 32769))
self.assertEqual(32769, paramiko.util.clamp_value(32767, 32770, 32769))
- def test_13_config_dos_crlf_succeeds(self):
+ def test_config_dos_crlf_succeeds(self):
config_file = StringIO("host abcqwerty\r\nHostName 127.0.0.1\r\n")
config = paramiko.SSHConfig()
config.parse(config_file)
self.assertEqual(config.lookup("abcqwerty")["hostname"], "127.0.0.1")
- def test_14_get_hostnames(self):
+ def test_get_hostnames(self):
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
self.assertEqual(config.get_hostnames(), set(['*', '*.example.com', 'spoo.example.com']))
@@ -462,9 +475,10 @@ Host param3 parara
safe_has_bytes = safe_string(has_bytes)
expected_bytes = b("has %07%03 bytes")
err = "{0!r} != {1!r}"
- assert safe_vanilla == vanilla, err.format(safe_vanilla, vanilla)
- assert safe_has_bytes == expected_bytes, \
- err.format(safe_has_bytes, expected_bytes)
+ msg = err.format(safe_vanilla, vanilla)
+ assert safe_vanilla == vanilla, msg
+ msg = err.format(safe_has_bytes, expected_bytes)
+ assert safe_has_bytes == expected_bytes, msg
def test_proxycommand_none_issue_418(self):
test_config_file = """
@@ -485,3 +499,33 @@ Host proxycommand-with-equals-none
paramiko.util.lookup_ssh_host_config(host, config),
values
)
+
+ def test_proxycommand_none_masking(self):
+ # Re: https://github.com/paramiko/paramiko/issues/670
+ source_config = """
+Host specific-host
+ ProxyCommand none
+
+Host other-host
+ ProxyCommand other-proxy
+
+Host *
+ ProxyCommand default-proxy
+"""
+ config = paramiko.SSHConfig()
+ config.parse(StringIO(source_config))
+ # When bug is present, the full stripping-out of specific-host's
+ # ProxyCommand means it actually appears to pick up the default
+ # ProxyCommand value instead, due to cascading. It should (for
+ # backwards compatibility reasons in 1.x/2.x) appear completely blank,
+ # as if the host had no ProxyCommand whatsoever.
+ # Threw another unrelated host in there just for sanity reasons.
+ self.assertFalse('proxycommand' in config.lookup('specific-host'))
+ self.assertEqual(
+ config.lookup('other-host')['proxycommand'],
+ 'other-proxy'
+ )
+ self.assertEqual(
+ config.lookup('some-random-host')['proxycommand'],
+ 'default-proxy'
+ )
diff --git a/tox-requirements.txt b/tox-requirements.txt
deleted file mode 100644
index 47ddd792..00000000
--- a/tox-requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-# Not sure why tox can't just read setup.py?
-cryptography >= 0.8
-pyasn1 >= 0.1.7
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index d420c1a3..00000000
--- a/tox.ini
+++ /dev/null
@@ -1,6 +0,0 @@
-[tox]
-envlist = py26,py27,py33,py34,pypy
-
-[testenv]
-commands = pip install -q -r tox-requirements.txt
- python test.py