diff options
author | Jeff Forcier <jeff@bitprophet.org> | 2017-06-06 15:15:40 -0700 |
---|---|---|
committer | Jeff Forcier <jeff@bitprophet.org> | 2017-06-06 15:15:40 -0700 |
commit | 996fb6fd8ffb6df4f56c81e2ff199b9a600ecfc6 (patch) | |
tree | 4571659cb5f9320275cfedccc3ae897b0f425343 | |
parent | 57394f5199ff75abc87b0373e18be2102540d50d (diff) | |
parent | ddb277d4e4989e914b67ff26c14c7c298e7fab9f (diff) |
Merge branch 'master' into 471-int
89 files changed, 5088 insertions, 5094 deletions
diff --git a/.travis.yml b/.travis.yml index 9a55dbb6..c8faf0a2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,16 @@ language: python sudo: false +cache: + directories: + - $HOME/.cache/pip python: - "2.6" - "2.7" - - "3.2" - "3.3" - "3.4" + - "3.5" + - "3.6" + - "pypy-5.4.1" install: # Self-install for setup.py-driven deps - pip install -e . @@ -13,15 +18,12 @@ install: - pip install coveralls # For coveralls.io specifically - pip install -r dev-requirements.txt script: - # Main tests, with coverage! + # Main tests, w/ coverage! - inv test --coverage - # Ensure documentation & invoke pipeline run OK. - # Run 'docs' first since its objects.inv is referred to by 'www'. - # Also force warnings to be errors since most of them tend to be actual - # problems. - # Finally, skip them under Python 3.2 due to sphinx shenanigans - - "[[ $TRAVIS_PYTHON_VERSION != 3.2 ]] && invoke docs -o -W || true" - - "[[ $TRAVIS_PYTHON_VERSION != 3.2 ]] && invoke www -o -W || true" + # Ensure documentation builds, both sites, maxxed nitpicking + - inv sites + # flake8 is now possible! + - flake8 notifications: irc: channels: "irc.freenode.org#paramiko" diff --git a/ChangeLog.0 b/ChangeLog.0 deleted file mode 100644 index c151d251..00000000 --- a/ChangeLog.0 +++ /dev/null @@ -1,42 +0,0 @@ - -2003-08-24: - * implemented the other hashes: all 4 from the draft are working now - * added 'aes128-cbc' and '3des-cbc' cipher support - * fixed channel eof/close semantics -2003-09-12: version "aerodactyl" - * implemented group-exchange kex ("kex-gex") - * implemented RSA/DSA private key auth -2003-09-13: - * fixed inflate_long and deflate_long to handle negatives, even though - they're never used in the current ssh protocol -2003-09-14: - * fixed session_id handling: re-keying works now - * added the ability for a Channel to have a fileno() for select/poll - purposes, although this will cause worse window performance if the - client app isn't careful -2003-09-16: version "bulbasaur" - * fixed pipe (fileno) method to be nonblocking and it seems to work now - * fixed silly bug that caused large blocks to be truncated -2003-10-08: - * patch to fix Channel.invoke_subsystem and add Channel.exec_command - [vaclav dvorak] - * patch to add Channel.sendall [vaclav dvorak] - * patch to add Channel.shutdown [vaclav dvorak] - * patch to add Channel.makefile and a ChannelFile class which emulates - a python file object [vaclav dvorak] -2003-10-26: - * thread creation no longer happens during construction -- use the new - method "start_client(event)" to get things rolling - * re-keying now takes place after 1GB of data or 1 billion packets - (these limits can be easily changed per-session if needed) -2003-11-06: - * added a demo server and host key -2003-11-09: - * lots of changes to server mode - * ChannelFile supports universal newline mode; fixed readline - * fixed a bug with parsing the remote banner -2003-11-10: version "charmander" - * renamed SSHException -> SecshException - * cleaned up server mode and the demo server - -*** for all subsequent changes, please see 'tla changelog'. diff --git a/ChangeLog.1 b/ChangeLog.1 deleted file mode 100644 index 2fdae5a0..00000000 --- a/ChangeLog.1 +++ /dev/null @@ -1,2928 +0,0 @@ -# do not edit -- automatically generated by arch changelog -# arch-tag: automatic-ChangeLog--robey@lag.net--2003-public/secsh--dev--1.0 -# - -2005-04-18 00:53:57 GMT Robey Pointer <robey@lag.net> patch-164 - - Summary: - fix some docs - Revision: - secsh--dev--1.0--patch-164 - - remove some epydoc comments about fileno() being non-portable. - - modified files: - paramiko/channel.py - - -2005-04-18 00:30:52 GMT Robey Pointer <robey@lag.net> patch-163 - - Summary: - add SFTPClient.close() - Revision: - secsh--dev--1.0--patch-163 - - add SFTPClient.close() and add a simple little unit test for it. - - - modified files: - paramiko/sftp_client.py tests/test_sftp.py - - -2005-04-18 00:11:34 GMT Robey Pointer <robey@lag.net> patch-162 - - Summary: - avoid os.environ['HOME'] in the demos - Revision: - secsh--dev--1.0--patch-162 - - avoid using os.environ['HOME'], which will never work on windows, and - use os.path.expanduser() instead. it's semi-moot because windows doesn't - have a standard location for ssh files, but i think paramiko should set a - good example anyway. - - modified files: - demo.py demo_simple.py - - -2005-04-16 23:38:22 GMT Robey Pointer <robey@lag.net> patch-161 - - Summary: - integrated laptop work (test commit) - Revision: - secsh--dev--1.0--patch-161 - - Patches applied: - - * robey@lag.net--2003-public-master-shake/secsh--dev--1.0--base-0 - tag of robey@lag.net--2003-public/secsh--dev--1.0--patch-160 - - * robey@lag.net--2003-public-master-shake/secsh--dev--1.0--patch-1 - test commit - - * robey@lag.net--2003-public/secsh--dev--1.0--base-0 - initial import - - * robey@lag.net--2003-public/secsh--dev--1.0--patch-1 - no changes - - - modified files: - README paramiko/server.py - - new patches: - robey@lag.net--2003-public-master-shake/secsh--dev--1.0--base-0 - robey@lag.net--2003-public-master-shake/secsh--dev--1.0--patch-1 - - -2005-04-10 00:46:41 GMT Robey Pointer <robey@lag.net> patch-160 - - Summary: - 1.3 marowak - Revision: - secsh--dev--1.0--patch-160 - - bump version to 1.3 / marowak - - modified files: - Makefile README paramiko/__init__.py paramiko/transport.py - setup.py - - -2005-04-10 00:39:18 GMT Robey Pointer <robey@lag.net> patch-159 - - Summary: - clean up SFTPAttributes.__repr__ - Revision: - secsh--dev--1.0--patch-159 - - clean up SFTPAttributes repr() a bit. - - modified files: - paramiko/sftp_attr.py - - -2005-04-10 00:13:54 GMT Robey Pointer <robey@lag.net> patch-158 - - Summary: - remove ChangeLog from MANIFEST.in - Revision: - secsh--dev--1.0--patch-158 - - remove ChangeLog from the dist list. - - modified files: - MANIFEST.in - - -2005-04-06 07:24:28 GMT Robey Pointer <robey@lag.net> patch-157 - - Summary: - change SubsystemHandler/SFTPServerInterface API - Revision: - secsh--dev--1.0--patch-157 - - change the API of SubsystemHandler to accept a reference to the - ServerInstance object during construction. this will break all code - that currently creates subsystem handlers (like sftp servers) -- sorry! - - lots of little doc fixups (mostly indenting). - - modified files: - paramiko/server.py paramiko/sftp_server.py paramiko/sftp_si.py - paramiko/transport.py tests/stub_sftp.py - - -2005-03-26 05:53:00 GMT Robey Pointer <robey@lag.net> patch-156 - - Summary: - rewrite channel pipes to work on windows - Revision: - secsh--dev--1.0--patch-156 - - the pipe system i was using for simulating an os-level FD (for select) was - retarded. i realized this week that i could just use a single byte in the - pipe to signal "data is ready" and not try to feed all incoming data thru - the pipe -- and then i don't have to try to make the pipe non-blocking (which - should make it work on windows). a lot of duplicate code got removed and now - it's all going thru the same code-path on read. - - there's still a slight penalty on incoming feeds and calling 'recv' when a - pipe has been opened (by calling 'fileno'), but it's tiny. - - removed a bunch of documentation and comments about things not working on - windows, since i think they probably do now. - - - removed files: - .arch-ids/demo_windows.py.id demo_windows.py - - modified files: - MANIFEST.in README paramiko/channel.py - - -2005-03-25 20:06:56 GMT Robey Pointer <robey@lag.net> patch-155 - - Summary: - fix sending of large sftp packet sizes - Revision: - secsh--dev--1.0--patch-155 - - fix a bug where packets larger than about 12KB would cause the session to - die on platforms other than osx. turns out that on most platforms, setting a - socket timeout also causes timeouts to occur on writes (but not on osx). so - on a huge write, once the os buffers were full, paramiko would get a - socket.timeout exception when writing, and bail. - - since the timeout is primarily so we can periodically poll to see if the - session has been killed from elsewhere, do that on a timeout but otherwise - continue trying to write. large packet sizes (in sftp) should now work. - - modified files: - paramiko/transport.py - - -2005-02-28 08:06:08 GMT Robey Pointer <robey@lag.net> patch-154 - - Summary: - even better 1.2 lapras - Revision: - secsh--dev--1.0--patch-154 - - re-bump the version # to 1.2 (with a new date since i added more stuff). - add 2005 to the copyright date in a bunch of files. - - - modified files: - Makefile README demo.py demo_server.py demo_simple.py - demo_windows.py forward.py paramiko/__init__.py - paramiko/auth_transport.py paramiko/ber.py paramiko/channel.py - paramiko/common.py paramiko/dsskey.py paramiko/file.py - paramiko/kex_gex.py paramiko/kex_group1.py - paramiko/logging22.py paramiko/message.py paramiko/pkey.py - paramiko/primes.py paramiko/rsakey.py paramiko/server.py - paramiko/sftp.py paramiko/sftp_attr.py paramiko/sftp_client.py - paramiko/sftp_handle.py paramiko/sftp_server.py - paramiko/sftp_si.py paramiko/ssh_exception.py - paramiko/transport.py paramiko/util.py setup.py test.py - tests/loop.py tests/stub_sftp.py tests/test_file.py - tests/test_kex.py tests/test_message.py tests/test_pkey.py - tests/test_sftp.py tests/test_transport.py - - -2005-02-28 07:49:56 GMT Robey Pointer <robey@lag.net> patch-153 - - Summary: - tweak sftp_file write behavior on large blocks of data - Revision: - secsh--dev--1.0--patch-153 - - BufferedFile.write() wasn't correctly dealing with the possibility that the - underlying write might not write the entire data block at once (even though - the docs said it would). now that it's working, make sftp_file take - advantage of it in order to chop up blocks larger than 32kB (the max allowed - on sftp) and add a unit test for it. - - - modified files: - paramiko/file.py paramiko/sftp_file.py tests/test_sftp.py - - -2005-02-28 07:17:21 GMT Robey Pointer <robey@lag.net> patch-152 - - Summary: - little doc fixes - Revision: - secsh--dev--1.0--patch-152 - - stupid little doc fixups that didn't fit with the other patches. - - modified files: - paramiko/auth_transport.py tests/loop.py - - -2005-02-28 07:16:22 GMT Robey Pointer <robey@lag.net> patch-151 - - Summary: - fix race in transport thread startup - Revision: - secsh--dev--1.0--patch-151 - - set active=True from the methods that start the main transport thread, right - before actually starting the thread. this avoids a race where the main - thread could be started, but the original thread could wake up from the - event.wait(0.1) before the new thread actually set the transport active. - impossible, you say? no machines so slow exist? au contraire, my sad - little linux box faced this problem earlier today. - - - modified files: - paramiko/transport.py - - -2005-02-28 07:14:11 GMT Robey Pointer <robey@lag.net> patch-150 - - Summary: - when combining stderr with stdout on a channel, merge the buffers too - Revision: - secsh--dev--1.0--patch-150 - - when turning on combine-stderr mode on a channel, grab the channel lock and - feed any existing stderr buffer into the normal buffer. this should help - applications (and my unit tests) avoid races between data coming in over - stderr and setting combine-stderr. - - _send_eof is now slightly safer too, although i don't think that really fixed - anything. it just makes me feel better. - - modified files: - paramiko/channel.py - - -2005-02-28 07:09:02 GMT Robey Pointer <robey@lag.net> patch-149 - - Summary: - add thread ids to logs - Revision: - secsh--dev--1.0--patch-149 - - add a logging filter that reports the thread-id of the logger, and use - that for all paramiko logging. since thread-local stuff didn't appear - until python 2.4, i hacked up my own little version to assign incrementing - numbers to threads as they log. - - - modified files: - paramiko/channel.py paramiko/sftp.py paramiko/sftp_client.py - paramiko/sftp_server.py paramiko/transport.py paramiko/util.py - - -2005-02-26 21:12:43 GMT Robey Pointer <robey@lag.net> patch-148 - - Summary: - forgot to check in stub_sftp - Revision: - secsh--dev--1.0--patch-148 - - yikes! don't forget to check this in: needed for unit tests. - - new files: - tests/.arch-ids/stub_sftp.py.id tests/stub_sftp.py - - -2005-02-26 21:11:04 GMT Robey Pointer <robey@lag.net> patch-147 - - Summary: - 1.2 (lapras) - Revision: - secsh--dev--1.0--patch-147 - - bump version stuff to 1.2 / lapras. - - modified files: - Makefile README paramiko/__init__.py paramiko/transport.py - setup.py - - -2005-02-15 15:48:47 GMT Robey Pointer <robey@lag.net> patch-146 - - Summary: - raise better exception on empty key - Revision: - secsh--dev--1.0--patch-146 - - raise a clearer exception when trying to create an empty key. - - - modified files: - README paramiko/dsskey.py paramiko/rsakey.py - tests/test_transport.py - - -2005-02-15 15:47:02 GMT Robey Pointer <robey@lag.net> patch-145 - - Summary: - add methods for sending/receiving a channel's exit status - Revision: - secsh--dev--1.0--patch-145 - - track a channel's exit status and provide a method (recv_exit_status) to - block waiting for it to arrive. also provide a convenience method for - servers to send it (send_exit_status). add shutdown_read and shutdown_write. - fix a bug in sending window change requests. - - - modified files: - README paramiko/channel.py paramiko/transport.py - - -2005-02-06 23:32:22 GMT Robey Pointer <robey@lag.net> patch-144 - - Summary: - fix docs - Revision: - secsh--dev--1.0--patch-144 - - clean up some of the docs. - - - modified files: - README paramiko/pkey.py paramiko/sftp_attr.py - - -2005-02-06 23:30:40 GMT Robey Pointer <robey@lag.net> patch-143 - - Summary: - fix an sftp unit test - Revision: - secsh--dev--1.0--patch-143 - - fix one of the sftp unit tests to actually work. - - - modified files: - tests/test_sftp.py - - -2005-02-05 07:45:20 GMT Robey Pointer <robey@lag.net> patch-142 - - Summary: - fix windows sample script's HOME - Revision: - secsh--dev--1.0--patch-142 - - fix the HOME environ var to work on windows too. - - modified files: - demo_windows.py - - -2005-01-25 05:17:55 GMT Robey Pointer <robey@lag.net> patch-141 - - Summary: - misc logging fixes - Revision: - secsh--dev--1.0--patch-141 - - change the level of some log messages so interesting stuff gets logged at - info instead of debug. fix an oops where channels defaulted to being in - ultra debug mode, and make this mode depend on a new Transport method: - "set_hexdump". - - - modified files: - paramiko/auth_transport.py paramiko/channel.py - paramiko/sftp.py paramiko/sftp_client.py - paramiko/sftp_server.py paramiko/transport.py - - -2005-01-17 10:09:09 GMT Robey Pointer <robey@lag.net> patch-140 - - Summary: - more flexible logging - Revision: - secsh--dev--1.0--patch-140 - - some tweaks to make channels etc follow the logger setting of their parent - transport, so that setting the log channel for a paramiko transport will - cause all sub-logging to branch out from that channel. - - also, close all open file handles when the sftp server ends. - - - modified files: - paramiko/channel.py paramiko/sftp_attr.py - paramiko/sftp_client.py paramiko/sftp_handle.py - paramiko/sftp_server.py paramiko/transport.py - - -2005-01-16 21:03:15 GMT Robey Pointer <robey@lag.net> patch-139 - - Summary: - make loopback sftp tests the default - Revision: - secsh--dev--1.0--patch-139 - - change the unit tests to default to always running the sftp tests locally, - and make a -R option to force the tests to run against a remote server. - the tests seem to work fine locally, and it helps test out server mode, - even though there's a danger that they could get isolated from reality - and only test that paramiko can talk to itself. - - - modified files: - test.py - - -2005-01-16 20:14:07 GMT Robey Pointer <robey@lag.net> patch-138 - - Summary: - doc fixups - Revision: - secsh--dev--1.0--patch-138 - - little doc fixups that i did obsessively on the train one morning. - - modified files: - paramiko/file.py - - -2005-01-09 05:27:07 GMT Robey Pointer <robey@lag.net> patch-137 - - Summary: - added listdir_attr() - Revision: - secsh--dev--1.0--patch-137 - - add SFTPClient.listdir_attr() to fetch a list of files & their attributes, - instead of just their filenames. artur piwko would find this useful. - - - modified files: - paramiko/sftp_attr.py paramiko/sftp_client.py - - -2004-12-19 19:56:48 GMT Robey Pointer <robey@lag.net> patch-136 - - Summary: - loopback sftp test - Revision: - secsh--dev--1.0--patch-136 - - add ability to turn off more tests, and a secret (for now) -X option to do - the sftp tests via loopback socket. added another symlink sftp test to see - what happens with absolute symlinks. - - - modified files: - test.py tests/test_sftp.py - - -2004-12-19 19:50:00 GMT Robey Pointer <robey@lag.net> patch-135 - - Summary: - more sftp cleanup - Revision: - secsh--dev--1.0--patch-135 - - oops, this should've been part of the last patch. - - - modified files: - paramiko/sftp_si.py - - -2004-12-19 19:43:27 GMT Robey Pointer <robey@lag.net> patch-134 - - Summary: - cleanup & docs in sftp - Revision: - secsh--dev--1.0--patch-134 - - add some more docs to SFTPHandle, and give a default implementation for - close() that's usually right. add a flush() to the default implementation - of write(). document that symlink's args in the sftp protocol are out of - order (the spec is wrong). - - - modified files: - paramiko/sftp_handle.py paramiko/sftp_server.py - - -2004-12-13 07:32:14 GMT Robey Pointer <robey@lag.net> patch-133 - - Summary: - unit test madness - Revision: - secsh--dev--1.0--patch-133 - - add some more testy bits and fix up some other bits. - - - modified files: - tests/test_transport.py - - -2004-12-13 07:31:01 GMT Robey Pointer <robey@lag.net> patch-132 - - Summary: - oops (continued) - Revision: - secsh--dev--1.0--patch-132 - - er, part 2 of that. - - - modified files: - paramiko/server.py - - -2004-12-13 07:29:38 GMT Robey Pointer <robey@lag.net> patch-131 - - Summary: - move check_global_request - Revision: - secsh--dev--1.0--patch-131 - - move check_global_request into the server interface -- i missed it during - the initial move (oops). - - - modified files: - paramiko/transport.py - - -2004-12-13 07:27:39 GMT Robey Pointer <robey@lag.net> patch-130 - - Summary: - small fixups - Revision: - secsh--dev--1.0--patch-130 - - move _wait_for_send_window into the right place in Channel. remove outdated - note from auth_transport. fix download url in setup.py. - - - - modified files: - paramiko/auth_transport.py paramiko/channel.py setup.py - - -2004-12-12 09:58:40 GMT Robey Pointer <robey@lag.net> patch-129 - - Summary: - 1.1 (kabuto) - Revision: - secsh--dev--1.0--patch-129 - - edit various files to bump the version to 1.1. - also fix to point to the new url. - - - modified files: - Makefile README paramiko/__init__.py paramiko/transport.py - setup.py - - -2004-12-12 09:38:24 GMT Robey Pointer <robey@lag.net> patch-128 - - Summary: - more unit tests - Revision: - secsh--dev--1.0--patch-128 - - added unit tests for multi-part auth, exec_command, and invoke_shell. - - - modified files: - tests/test_transport.py - - -2004-12-12 09:32:17 GMT Robey Pointer <robey@lag.net> patch-127 - - Summary: - doc fixups - Revision: - secsh--dev--1.0--patch-127 - - fix some typos in sftp_client docs - - - modified files: - paramiko/sftp_client.py - - -2004-12-12 09:25:15 GMT Robey Pointer <robey@lag.net> patch-126 - - Summary: - server support for stderr & exec_command - Revision: - secsh--dev--1.0--patch-126 - - for the server side of my stderr blunder, add send_stderr & sendall_stderr, - and make the sending side of makefile_stderr work correctly. - - also, call check_channel_exec_request on a server object for exec requests - on a channel. - - - modified files: - paramiko/channel.py paramiko/server.py - - -2004-12-12 09:16:03 GMT Robey Pointer <robey@lag.net> patch-125 - - Summary: - add client-side multi-part auth support - Revision: - secsh--dev--1.0--patch-125 - - added support for multi-part authentication (even though nobody supports it - that i've seen). on a successful "partial" auth, the auth_* method will - return a list of acceptable means to continue authenticating. - - - modified files: - paramiko/auth_transport.py paramiko/ssh_exception.py - - -2004-12-11 03:44:33 GMT Robey Pointer <robey@lag.net> patch-124 - - Summary: - docs fixup - Revision: - secsh--dev--1.0--patch-124 - - fix a comment typo, and add @since designators to a couple of new methods. - - - modified files: - paramiko/channel.py paramiko/sftp_server.py - - -2004-12-11 03:43:18 GMT Robey Pointer <robey@lag.net> patch-123 - - Summary: - clean up authentication - Revision: - secsh--dev--1.0--patch-123 - - add new exception "BadAuthenticationType", which is raised when auth fails - because your auth type (password or public-key) isn't valid on the server. - - used this as an excuse to clean up auth_password and auth_publickey so their - 'event' arg is optional, and if missing, they block until auth is finished, - raising an exception on error. - - also, don't close the session on failed auth -- the server may let you try - again. - - added some test cases for failed auth. - - - modified files: - paramiko/__init__.py paramiko/auth_transport.py - paramiko/ssh_exception.py paramiko/transport.py - tests/test_transport.py - - -2004-12-10 08:30:44 GMT Robey Pointer <robey@lag.net> patch-122 - - Summary: - symlink, readlink - Revision: - secsh--dev--1.0--patch-122 - - add support for symlink command, and finish support for readlink. (i guess - i started readlink a while ago but forgot to add the right method to the - SFTPServerInterface class.) - - - modified files: - paramiko/sftp_server.py paramiko/sftp_si.py tests/test_sftp.py - - -2004-12-10 08:27:43 GMT Robey Pointer <robey@lag.net> patch-121 - - Summary: - other part of that last patch - Revision: - secsh--dev--1.0--patch-121 - - oops, forgot this part. - - modified files: - paramiko/transport.py - - -2004-12-10 08:25:28 GMT Robey Pointer <robey@lag.net> patch-120 - - Summary: - add stderr support methods - Revision: - secsh--dev--1.0--patch-120 - - big embarrassment: i didn't read the ssh2 docs close enough, and all this - time paramiko wasn't handling "extended_data" packets, which contain stderr - output. - - so now, several new functions: recv_stderr_ready() and recv_stderr() to - mirror recv_ready() and recv(), and set_combined_stderr() to force stderr - to be combined into stdout. also, makefile_stderr() to create a fake file - object to represent stderr. - - - modified files: - paramiko/channel.py - - -2004-12-10 07:55:33 GMT Robey Pointer <robey@lag.net> patch-119 - - Summary: - reformat README - Revision: - secsh--dev--1.0--patch-119 - - reformatted the README to a slightly smaller margin, just because. - - - modified files: - README - - -2004-12-09 04:15:12 GMT Robey Pointer <robey@lag.net> patch-118 - - Summary: - fix SFTPFile gettimeout/settimeout - Revision: - secsh--dev--1.0--patch-118 - - i don't think the gettimeout/settimeout calls on SFTPFile ever worked. - also, simplify the implementation of _get_size() since it's nearly - identical to stat(). - - - modified files: - paramiko/sftp_file.py - - -2004-12-09 02:42:36 GMT Robey Pointer <robey@lag.net> patch-117 - - Summary: - readme comments - Revision: - secsh--dev--1.0--patch-117 - - add another fixme to the readme - - modified files: - README - - -2004-11-26 22:07:31 GMT Robey Pointer <robey@lag.net> patch-116 - - Summary: - doc fixups - Revision: - secsh--dev--1.0--patch-116 - - explain "recv_ready" better, and add debug descriptions for the kex codes. - - - modified files: - README paramiko/channel.py paramiko/common.py - - -2004-11-25 19:39:34 GMT Robey Pointer <robey@lag.net> patch-115 - - Summary: - fix CONNECTION_FAILED_CODE - Revision: - secsh--dev--1.0--patch-115 - - oops, fix typo in channel request failed. - - modified files: - paramiko/transport.py - - -2004-11-22 07:40:39 GMT Robey Pointer <robey@lag.net> patch-114 - - Summary: - fix typo in channel - Revision: - secsh--dev--1.0--patch-114 - - fix typo that alain found: pipd_wfd -> pipe_wfd. - - - modified files: - paramiko/channel.py - - -2004-11-22 07:27:21 GMT Robey Pointer <robey@lag.net> patch-113 - - Summary: - sftp server support! - Revision: - secsh--dev--1.0--patch-113 - - finally check in sftp_handle (file handle abstraction), sftp_si (server - interface), and sftp_server (server implementation) -- all of which make - a roughly 90% implementation of server-side sftp. - - - - new files: - paramiko/.arch-ids/sftp_handle.py.id - paramiko/.arch-ids/sftp_server.py.id - paramiko/.arch-ids/sftp_si.py.id paramiko/sftp_handle.py - paramiko/sftp_server.py paramiko/sftp_si.py - - modified files: - README demo_windows.py paramiko/__init__.py - - -2004-11-22 07:07:08 GMT Robey Pointer <robey@lag.net> patch-112 - - Summary: - add finish_subsystem() - Revision: - secsh--dev--1.0--patch-112 - - when a SubsystemHandler is being decomissioned (the client has closed the - channel or transport, or the socket went away), make a callback to let the - handler do any shutdown it needs to. - - - modified files: - paramiko/server.py - - -2004-11-22 07:04:31 GMT Robey Pointer <robey@lag.net> patch-111 - - Summary: - fix extremely unlikely channel counter wrapping - Revision: - secsh--dev--1.0--patch-111 - - Transport's channel counter can overflow after 4 billion some channels are - created. make it wrap back around after 16 million instead. also allow the - logging channel to be set manually. fix some comments elsewhere. - - - modified files: - paramiko/channel.py paramiko/primes.py paramiko/transport.py - - -2004-11-22 07:01:43 GMT Robey Pointer <robey@lag.net> patch-110 - - Summary: - fix Transport.get_username() to work in server mode too - Revision: - secsh--dev--1.0--patch-110 - - whenever i split the 'username' field into username and auth_username, - i guess that made get_username() stop working for server mode (because the - username was stored in a different field). this should fix it. - - modified files: - paramiko/auth_transport.py - - -2004-11-07 03:10:53 GMT Robey Pointer <robey@lag.net> patch-109 - - Summary: - v1.0 (jigglypuff) - Revision: - secsh--dev--1.0--patch-109 - - bump all the version numbers up to 1.0 (jigglypuff). - - modified files: - Makefile README paramiko/__init__.py paramiko/transport.py - setup.py - - -2004-11-07 02:51:42 GMT Robey Pointer <robey@lag.net> patch-108 - - Summary: - add filename to SFTPAttributes - Revision: - secsh--dev--1.0--patch-108 - - add filename to the attributes stored in an SFTPAttributes object. - - modified files: - paramiko/sftp_attr.py - - -2004-11-07 02:31:48 GMT Robey Pointer <robey@lag.net> patch-107 - - Summary: - fix kex_gex - Revision: - secsh--dev--1.0--patch-107 - - fix kex_gex (group-exchange key exchange) to, *cough*, work again, and also - layout kex_group1 a little more sanely. - - modified files: - paramiko/kex_gex.py paramiko/kex_group1.py - - -2004-11-07 02:29:54 GMT Robey Pointer <robey@lag.net> patch-106 - - Summary: - fix chmod +x on demo_windows.py - Revision: - secsh--dev--1.0--patch-106 - - forgot to make demo_windows +x - - -2004-11-07 02:29:20 GMT Robey Pointer <robey@lag.net> patch-105 - - Summary: - move ChangeLog - Revision: - secsh--dev--1.0--patch-105 - - move ChangeLog out of the way because tla can autogenerate any useful - ChangeLog. - - - renamed files: - .arch-ids/ChangeLog.id - ==> .arch-ids/ChangeLog-old.id - ChangeLog - ==> ChangeLog-old - - -2004-11-07 02:28:33 GMT Robey Pointer <robey@lag.net> patch-104 - - Summary: - fix location of SFTPError - Revision: - secsh--dev--1.0--patch-104 - - fix location of SFTPError. - - modified files: - paramiko/__init__.py paramiko/sftp_client.py - - -2004-11-07 02:17:18 GMT Robey Pointer <robey@lag.net> patch-103 - - Summary: - rename sftp constants - Revision: - secsh--dev--1.0--patch-103 - - replace oddly named sftp constants (FX_OK for example) with names that make - a bit more sense when sober (SFTP_OK). - - modified files: - paramiko/__init__.py paramiko/sftp.py paramiko/sftp_client.py - - -2004-11-07 02:08:11 GMT Robey Pointer <robey@lag.net> patch-102 - - Summary: - add key exchange tests + 1 more sftp test - Revision: - secsh--dev--1.0--patch-102 - - add test suite for key-exchange protocols, since i apparently broke the - "gex" protocol recently and never noticed. also add an sftp unit test for - mkdir/rmdir. - - new files: - tests/.arch-ids/test_kex.py.id tests/test_kex.py - - modified files: - test.py tests/test_sftp.py - - -2004-11-07 02:00:50 GMT Robey Pointer <robey@lag.net> patch-101 - - Summary: - remove old demo keys - Revision: - secsh--dev--1.0--patch-101 - - the keys are in tests/ now. - - removed files: - .arch-ids/demo_dss_key.id .arch-ids/demo_rsa_key.id - demo_dss_key demo_rsa_key - - -2004-11-06 20:32:08 GMT Robey Pointer <robey@lag.net> patch-100 - - Summary: - don't forget demo_windows.py - Revision: - secsh--dev--1.0--patch-100 - - update MANIFEST.in to include demo_windows.py and not include the demo - keys (they're in tests/ now). clean up the README to explain the demo - scripts better now, since there are so many of them. then fix up the - demo scripts to look in tests/ for the keys. - - demo_windows.py doesn't need to call get_pty() (in fact, i think that's - blowing openssh's mind) and was executing the wrong command. - - - modified files: - MANIFEST.in README demo_server.py demo_simple.py - demo_windows.py - - -2004-11-01 07:07:48 GMT Robey Pointer <robey@lag.net> patch-99 - - Summary: - use getpass - Revision: - secsh--dev--1.0--patch-99 - - convert raw_input to getpass as suggested many weeks ago. - - modified files: - forward.py - - -2004-11-01 03:54:01 GMT Robey Pointer <robey@lag.net> patch-98 - - Summary: - don't unlink a Channel until the server closes it too - Revision: - secsh--dev--1.0--patch-98 - - when close()'ing a Channel, don't immediately unlink it from the Transport. - instead, wait for the server to send a close message. - - this should fix a bug where doing close() on an EOF'd channel would cause - the entire transport to be killed, because the server would send an - 'exit-status' and 'close' message for a channel that we no longer had a - record of. - - - modified files: - paramiko/channel.py - - -2004-11-01 03:43:28 GMT Robey Pointer <robey@lag.net> patch-97 - - Summary: - better debugging, improve subsytem handler - Revision: - secsh--dev--1.0--patch-97 - - add a list of ssh packet names for debugging. improve the server-mode - subsystem handler so it can take extra parameters (list or keyword) and - pass them to the subsystem constructor. remove a misleading comment - about rekeying (which was already implemented). - - - modified files: - paramiko/common.py paramiko/server.py paramiko/transport.py - - -2004-11-01 03:37:42 GMT Robey Pointer <robey@lag.net> patch-96 - - Summary: - remove key.valid check - Revision: - secsh--dev--1.0--patch-96 - - oops! 'key.valid' no longer works -- catch the SSHException instead, and log - it. - - - modified files: - paramiko/auth_transport.py - - -2004-10-23 07:36:23 GMT Robey Pointer <robey@lag.net> patch-95 - - Summary: - ivysaur 0.9 - Revision: - secsh--dev--1.0--patch-95 - - update ivysaur release date, and add the list of changes to the README - file. - - - modified files: - Makefile README paramiko/__init__.py - - -2004-10-20 16:52:51 GMT Robey Pointer <robey@lag.net> patch-94 - - Summary: - start testing Transport - Revision: - secsh--dev--1.0--patch-94 - - the beginnings of tests for Transport. only the bare minimum is there right - now. - - also started doc'ing things up to ivysaur. - - new files: - .arch-ids/demo_windows.py.id demo_windows.py - tests/.arch-ids/loop.py.id - tests/.arch-ids/test_transport.py.id tests/loop.py - tests/test_transport.py - - modified files: - Makefile README paramiko/__init__.py setup.py test.py - - -2004-10-18 04:54:27 GMT Robey Pointer <robey@lag.net> patch-93 - - Summary: - switch Transport.connect() to using a Pkey object for the host key - Revision: - secsh--dev--1.0--patch-93 - - i suddenly realized that passing "hostkeytype" and "hostkey" as strings to - Transport.connect() was pretty silly since i went to all the effort of making - a class specifically for holding keys. so Transport.connect() now just takes - host-key argument: "hostkey" as a PKey object. - - updated the demos to use PKey objects when reading the host key file, and to - use the new "hostkey" argument. - - - modified files: - demo.py demo_simple.py paramiko/pkey.py paramiko/transport.py - - -2004-09-25 22:07:59 GMT Robey Pointer <robey@lag.net> patch-92 - - Summary: - add rsa/dss key object unit tests - Revision: - secsh--dev--1.0--patch-92 - - add tests for rsa/dss key objects -- yay! - - - new files: - tests/.arch-ids/test_dss.key.id - tests/.arch-ids/test_pkey.py.id - tests/.arch-ids/test_rsa.key.id tests/test_dss.key - tests/test_pkey.py tests/test_rsa.key - - -2004-09-25 22:03:48 GMT Robey Pointer <robey@lag.net> patch-91 - - Summary: - fix test.py to use options instead of env vars, sftp tests default off - Revision: - secsh--dev--1.0--patch-91 - - fix up the test framework so that the sftp unit tests aren't always run (you - have to ask for them explicitly) and they take their configuration from - command-line options. they still require a remote server. - - modified files: - test.py tests/test_sftp.py - - -2004-09-25 21:58:11 GMT Robey Pointer <robey@lag.net> patch-90 - - Summary: - fix __init__ - Revision: - secsh--dev--1.0--patch-90 - - fix __init__ to export BufferedFile and randpool, and to catch up with the - changes from a week or 2 ago where sftp_attr & friends were split off. - - modified files: - paramiko/__init__.py - - -2004-09-25 21:47:19 GMT Robey Pointer <robey@lag.net> patch-89 - - Summary: - fix some Transport docs - Revision: - secsh--dev--1.0--patch-89 - - document that Transport also would like close() and settimeout() to exist - on the socket-like object passed to the constructor. - - modified files: - paramiko/transport.py - - -2004-09-25 21:32:53 GMT Robey Pointer <robey@lag.net> patch-88 - - Summary: - add Message.rewind() - Revision: - secsh--dev--1.0--patch-88 - - add rewind() method to Message, which just resets the pointer so you can - start reading from the beginning again. this is useful for some tests. - - modified files: - paramiko/message.py tests/test_message.py - - -2004-09-25 21:28:23 GMT Robey Pointer <robey@lag.net> patch-87 - - Summary: - clean up pkey interface - Revision: - secsh--dev--1.0--patch-87 - - change the pkey interface so that it's no longer possible to have a pkey - that doesn't represent a valid key. (ie: no more "blank" key objects.) - also add "get_bits" and "can_sign" methods to determine the key bit length - and whether it can sign things (contains the "private parts") respectively. - - modified files: - paramiko/dsskey.py paramiko/pkey.py paramiko/rsakey.py - - -2004-09-11 21:01:32 GMT Robey Pointer <robey@lag.net> patch-86 - - Summary: - unit tests for Message - Revision: - secsh--dev--1.0--patch-86 - - spanking new unit tests for Message. i'm trying to fix the embarrassment - of having so little of paramiko testable. next up is Transport! - - new files: - tests/.arch-ids/test_message.py.id tests/test_message.py - - -2004-09-11 20:56:01 GMT Robey Pointer <robey@lag.net> patch-85 - - Summary: - move SFTPFile and SFTPAttributes into their own files - Revision: - secsh--dev--1.0--patch-85 - - move SFTPFile and SFTPAttributes into their own files. - - new files: - paramiko/.arch-ids/sftp_attr.py.id - paramiko/.arch-ids/sftp_file.py.id paramiko/sftp_attr.py - paramiko/sftp_file.py - - modified files: - paramiko/sftp.py paramiko/sftp_client.py - - -2004-09-11 20:50:39 GMT Robey Pointer <robey@lag.net> patch-84 - - Summary: - add sftp.normalize - Revision: - secsh--dev--1.0--patch-84 - - kevin c. dorff pointed out that it would be nice to expose a way to - determine the server's "current working directory", so this new method - (normalize) directly maps to REALPATH. - - modified files: - paramiko/sftp_client.py - - -2004-09-11 20:43:09 GMT Robey Pointer <robey@lag.net> patch-83 - - Summary: - tweak Message.add() in the key exchanges - Revision: - secsh--dev--1.0--patch-83 - - use the new Message.add() behavior to make a little code here much easier - to read. - - modified files: - paramiko/kex_gex.py paramiko/kex_group1.py - - -2004-09-11 20:40:08 GMT Robey Pointer <robey@lag.net> patch-82 - - Summary: - doc fixes - Revision: - secsh--dev--1.0--patch-82 - - fix "string" -> "str" in types when documenting BufferedFile. - - modified files: - paramiko/file.py - - -2004-09-11 20:37:59 GMT Robey Pointer <robey@lag.net> patch-81 - - Summary: - more unit tests - Revision: - secsh--dev--1.0--patch-81 - - add test for BufferedFile.read(-1) and sftp.normalize(). - - modified files: - tests/test_file.py tests/test_sftp.py - - -2004-09-11 20:36:49 GMT Robey Pointer <robey@lag.net> patch-80 - - Summary: - move SubsystemHandler to server.py - Revision: - secsh--dev--1.0--patch-80 - - move SubsystemHandler into server.py where it makes more sense (it's part of - the server interface). - - also fix up paramiko's "version string" used in ssh2 negotiation to stop - saying "pyssh" and start saying "paramiko". :) - - modified files: - paramiko/server.py paramiko/transport.py - - -2004-09-11 20:35:19 GMT Robey Pointer <robey@lag.net> patch-79 - - Summary: - Message.add() can take many args - Revision: - secsh--dev--1.0--patch-79 - - a bit of cleanup to Message: add() can now take any number of params, and - will add them all in order (using type guessing). - - modified files: - paramiko/message.py - - -2004-09-09 01:36:45 GMT Robey Pointer <robey@lag.net> patch-78 - - Summary: - fix rbuffer -> _rbuffer in 3 places i missed - Revision: - secsh--dev--1.0--patch-78 - - fix 3 places where "rbuffer" hadn't been converted to "_rbuffer". thanks to - kevin c. dorff for the bug report. - - modified files: - paramiko/file.py - - -2004-09-07 06:56:49 GMT Robey Pointer <robey@lag.net> patch-77 - - Summary: - docs for SubsystemHandler - Revision: - secsh--dev--1.0--patch-77 - - add documentation to constructor for SubsystemHandler. - - modified files: - paramiko/transport.py - - -2004-09-07 06:54:31 GMT Robey Pointer <robey@lag.net> patch-76 - - Summary: - add sftp_client.py - Revision: - secsh--dev--1.0--patch-76 - - i retardedly forgot to import this file a few days ago: it's the split-out - client mode for sftp. it now also has some changes to adapt it to the - improved SFTPAttributes object API. - - new files: - paramiko/.arch-ids/sftp_client.py.id paramiko/sftp_client.py - - -2004-09-07 06:51:03 GMT Robey Pointer <robey@lag.net> patch-75 - - Summary: - clean up SFTPAttributes - Revision: - secsh--dev--1.0--patch-75 - - add english descriptions to the FX_* error codes of sftp. clean up (and - document) SFTPAttributes since it's exported now, and make it simple to - generate one from a python os.stat object. make "_pythonize" the default -- - that is, just use the same field names as python does for os.stat. (i'm not - sure why i didn't do it that way in the first place; probably ignorance.) - also add str() method that converts the SFTPAttributes into a string suitable - for use in ls (used in an obscure way in sftp servers). - - modified files: - paramiko/sftp.py - - -2004-09-07 06:45:53 GMT Robey Pointer <robey@lag.net> patch-74 - - Summary: - note pycrypto 2.0 in README - Revision: - secsh--dev--1.0--patch-74 - - update the README to note that pycrypto 2.0 works (i just tried it). also - fix the name from pyCrypt back to pycrypto -- that project is having trouble - making up its mind about naming. :) - - modified files: - README - - -2004-09-05 07:44:03 GMT Robey Pointer <robey@lag.net> patch-73 - - Summary: - split sftp into sftp, sftp_client; renamed SFTP -> SFTPClient - Revision: - secsh--dev--1.0--patch-73 - - add sftp_client file, and split out the common code (sftp) from stuff specific - to client mode (sftp_client). renamed SFTP class to SFTPClient, but left an - alias so old code will still work. - - renamed a bunch of sftp constants now that they're better hidden from epydoc. - - modified files: - README paramiko/__init__.py paramiko/sftp.py - - -2004-09-05 07:41:45 GMT Robey Pointer <robey@lag.net> patch-72 - - Summary: - some framework for adding subsystem handlers in server mode - Revision: - secsh--dev--1.0--patch-72 - - you can now register a subsystem with a Transport by passing in the name - (like "sftp") and a class (like a hypothetical SFTPServer). the default - ServerInterface.check_channel_request_subsystem now checks this table in - Transport, and if it finds a match, it creates a new thread for the handler - and calls into it. a new class SubsystemHandler is added for this purpose - (to be subclassed). - - modified files: - paramiko/server.py paramiko/transport.py - - -2004-09-05 07:37:40 GMT Robey Pointer <robey@lag.net> patch-71 - - Summary: - remove redundant 'auth_complete' member - Revision: - secsh--dev--1.0--patch-71 - - remove the redundant 'auth_complete' field and just use 'authenticated' for - both client and server mode. this makes the repr() string look correct in - server mode instead of always claiming that the transport is un-auth'd. - - modified files: - paramiko/auth_transport.py - - -2004-09-03 22:39:20 GMT Robey Pointer <robey@lag.net> patch-70 - - Summary: - clean up server interface; no longer need to subclass Channel - Revision: - secsh--dev--1.0--patch-70 - - - export AUTH_*, OPEN_FAILED_*, and the new OPEN_SUCCEEDED into the paramiko - namespace instead of making people dig into paramiko.Transport.AUTH_* etc. - - move all of the check_* methods from Channel to ServerInterface so apps - don't need to subclass Channel anymore just to run an ssh server - - ServerInterface.check_channel_request() returns an error code now, not a - new Channel object - - fix demo_server.py to follow all these changes - - fix a bunch of places where i used "string" in docstrings but meant "str" - - added Channel.get_id() - - modified files: - README demo_server.py paramiko/__init__.py - paramiko/auth_transport.py paramiko/channel.py - paramiko/common.py paramiko/server.py paramiko/sftp.py - paramiko/transport.py - - -2004-08-31 02:44:56 GMT Robey Pointer <robey@lag.net> patch-69 - - Summary: - clean up SecurityOptions - Revision: - secsh--dev--1.0--patch-69 - - the preferences are now tuples in Transport, and passed as tuples out of - SecurityOptions, so that the options can't be modified without setting them - back to the options field again. the algorithm lists in Transport are used - to validate the fields. - - modified files: - paramiko/transport.py - - -2004-08-30 20:22:10 GMT Robey Pointer <robey@lag.net> patch-68 - - Summary: - added Transport.get_security_options() - Revision: - secsh--dev--1.0--patch-68 - - just something i wanted to play with: - added Transport.get_security_options() which returns a SecurityOptions object. - this object is a kind of proxy for the 4 "preferred_*" fields in Transport, - and lets me avoid exposing those fields directly in case i change my mind - later about how they should be stored. - - added some docs to Channel explaining that the request methods now return - True/False, and fixed up docs in a few other places. - - modified files: - paramiko/__init__.py paramiko/channel.py paramiko/server.py - paramiko/sftp.py paramiko/transport.py - - -2004-08-28 04:21:12 GMT Robey Pointer <robey@lag.net> patch-67 - - Summary: - replay patch 63 (missing channel changes) - Revision: - secsh--dev--1.0--patch-67 - - i'm still getting the hang of tla/arch, obviously. - - replay patch 63, which was meant to be part of the later mega-patch, but - apparently when i reversed it, i lost it entirely. - - modified files: - paramiko/channel.py - - -2004-08-27 00:57:40 GMT Robey Pointer <robey@lag.net> patch-66 - - Summary: - new ServerInterface class, outbound rekey works, etc. - Revision: - secsh--dev--1.0--patch-66 - - a bunch of changes that i'm too lazy to split out into individual patches: - * all the server overrides from transport.py have been moved into a separate - class ServerInterface, so server code doesn't have to subclass the whole - paramiko library - * updated demo_server to subclass ServerInterface - * when re-keying during a session, block other messages until the new keys - are activated (openssh doensn't like any other traffic during a rekey) - * re-key when outbound limits are tripped too (was only counting inbound - traffic) - * don't log scary things on EOF - - - new files: - paramiko/.arch-ids/server.py.id paramiko/server.py - - modified files: - README demo_server.py paramiko/__init__.py - paramiko/auth_transport.py paramiko/transport.py - - -2004-08-27 00:28:33 GMT Robey Pointer <robey@lag.net> patch-65 - - Summary: - add settimeout/gettimeout/setblocking, some bugfixes. - Revision: - secsh--dev--1.0--patch-65 - - hide the command and response codes in sftp so they aren't exported. - add settimeout/gettimeout/setblocking that just wrap calls to the underlying - socket or channel. fix _read_all to not catch timeout exceptions. - - - modified files: - paramiko/sftp.py - - -2004-08-27 00:26:35 GMT Robey Pointer <robey@lag.net> patch-64 - - Summary: - reverse messed-up patch - Revision: - secsh--dev--1.0--patch-64 - - Patches applied: - - * robey@lag.net--2003-public/secsh--dev--1.0--base-0 - initial import - - * robey@lag.net--2003-public/secsh--dev--1.0--patch-1 - no changes - - - modified files: - paramiko/channel.py {arch}/=tagging-method - - -2004-08-27 00:06:42 GMT Robey Pointer <robey@lag.net> patch-63 - - Summary: - add settimeout/gettimeout/setblocking, some bugfixes. - Revision: - secsh--dev--1.0--patch-63 - - hide the command and response codes in sftp so they aren't exported. - add settimeout/gettimeout/setblocking that just wrap calls to the underlying - socket or channel. fix _read_all to not catch timeout exceptions. - - modified files: - paramiko/channel.py - - -2004-06-27 20:14:15 GMT Robey Pointer <robey@lag.net> patch-62 - - Summary: - version -> horsea - Revision: - secsh--dev--1.0--patch-62 - - up version to horsea. - - modified files: - Makefile README paramiko/__init__.py setup.py - {arch}/secsh/secsh--dev/secsh--dev--1.0/robey@lag.net--2003-public/patch-log/patch-1 - - -2004-06-10 18:12:00 GMT Robey Pointer <robey@lag.net> patch-61 - - Summary: - no more Foobar - Revision: - secsh--dev--1.0--patch-61 - - fix "Foobar" to be "Paramiko" in the one place i missed it in all the gpl - headers. sigh. :) - - modified files: - paramiko/__init__.py paramiko/auth_transport.py - paramiko/ber.py paramiko/common.py paramiko/dsskey.py - paramiko/kex_gex.py paramiko/kex_group1.py - paramiko/logging22.py paramiko/message.py paramiko/pkey.py - paramiko/primes.py paramiko/rsakey.py - paramiko/ssh_exception.py paramiko/util.py test.py - - -2004-06-10 18:08:50 GMT Robey Pointer <robey@lag.net> patch-60 - - Summary: - limit read/write requests to 32KB, advertise 32KB max packet size - Revision: - secsh--dev--1.0--patch-60 - - one of the unit tests was failing because the openssh sftp server was dropping - the connection without any error. turns out they have a maximum allowed write - size (possibly around 64KB). the sftp rfcs have a small hint that some servers - may drop read/write requests of greater than 32KB. - - so, all reads are limited to 32KB, and all writes > 32KB are now chopped up - and sent in 32KB chunks. this seems to keep openssh happy. - - also, we now advertise 32KB max packet size instead of 8KB (the speed - improves a lot), and log when we read/write a packet. and sftp files are - flushed on seek. - - modified files: - paramiko/sftp.py paramiko/transport.py - - -2004-06-10 18:02:13 GMT Robey Pointer <robey@lag.net> patch-59 - - Summary: - speed up parts of BufferedFile - Revision: - secsh--dev--1.0--patch-59 - - BufferedFile uses cStringIO for the write buffer now (i don't actually notice - any speed difference so this might revert later) and the default buffer size - has been upped from 1KB to 8KB. - - when scanning for linefeeds (when writing to a line-buffered file), only scan - the newly-written bytes, since we know all the previously buffered data is - linefeed-free. this was the #1 slowdown on the 1MB-file unit test. - - also, limit the buffering on line-buffered files to whatever the default - buffer size is. there's no reason to buffer 1MB waiting for a linefeed. - - modified files: - paramiko/file.py - - -2004-06-10 17:55:17 GMT Robey Pointer <robey@lag.net> patch-58 - - Summary: - some Channel fixes for max packet size & blocking on zero window - Revision: - secsh--dev--1.0--patch-58 - - some clean-ups and fixes to channels: - * when send() is blocked on a zero-width window, check that the channel is - still open. this was causing some lockups. - * set a lower bound to the "maximum packet size" we accept from the remote - host. if they tell us anything less than 1KB, assume they meant 1KB. (it's - not reasonable to fragment below that.) - * leave a little padding instead of cutting right up to the maximum packet - size: some space will be taken up by protocol overhead. - * turn off some of the debug log lines unless "ultra_debug" is on (nobody - cares about the feed info) - - - modified files: - paramiko/channel.py - - -2004-06-10 17:35:30 GMT Robey Pointer <robey@lag.net> patch-57 - - Summary: - more unit tests - Revision: - secsh--dev--1.0--patch-57 - - add a unit test for sending a large (1MB) file with line buffering but no - linefeeds (this triggered several bugs and inefficiencies), and another test - to verify that the write buffer is flushed on seek. - - modified files: - tests/test_file.py tests/test_sftp.py - - -2004-05-31 23:48:10 GMT Robey Pointer <robey@lag.net> patch-56 - - Summary: - add forward.py demo script; bump to gyarados - Revision: - secsh--dev--1.0--patch-56 - - add a demo script to show how to do local port forwarding. - - add gyarados to all the docs and bump the version number everywhere. - - new files: - .arch-ids/forward.py.id forward.py - - modified files: - MANIFEST.in Makefile README paramiko/__init__.py setup.py - - -2004-05-29 18:58:11 GMT Robey Pointer <robey@lag.net> patch-55 - - Summary: - add an sftp unit test for making 100 files - Revision: - secsh--dev--1.0--patch-55 - - create 100 files on the remote server, set their mode with chmod, then verify - that they're all there and contain the right data. valeriy is reporting that - sometimes he's getting stuck after 20 and though i'm not seeing it, i want to - add a test to try to pin it down. - - modified files: - tests/test_sftp.py - - -2004-05-29 18:56:10 GMT Robey Pointer <robey@lag.net> patch-54 - - Summary: - add direct-tcpip ability to open_channel - Revision: - secsh--dev--1.0--patch-54 - - open_channel can now be given a dest_addr and src_addr, which are filled in - if the channel type is "forwarded-tcpip" or "direct-tcpip". these channel - types are used in remote & local port forwarding, respectively. i've only - tested "direct-tcpip" but i think if one works, they both should work. - - also fixed a bug in connect where it was still assuming the old meaning for - get_remove_server_key() (oops!) and changed the sense of a send() failure - from <= 0 to < 0 since it may be possible for send() to return 0 and it not - be an EOF error. - - modified files: - paramiko/transport.py - - -2004-05-29 18:48:23 GMT Robey Pointer <robey@lag.net> patch-53 - - Summary: - add note about utf8 encodings - Revision: - secsh--dev--1.0--patch-53 - - add info to the README about what to do if python complains about missing - encodings. veleriy pogrebitskiy ran into this and had advice. - - modified files: - README - - -2004-05-17 07:41:50 GMT Robey Pointer <robey@lag.net> patch-52 - - Summary: - fix deadlock in closing a channel - Revision: - secsh--dev--1.0--patch-52 - - closing a channel would enter an odd codepath where the lock was grabbed, - some stuff was done, then another function was called where the lock was - grabbed again. unfortunately python locks aren't monitors so this would - deadlock. instead, make the smaller function lock-free with an explicit - notice that you must be holding the lock before calling. - - modified files: - paramiko/channel.py - - -2004-05-17 00:43:43 GMT Robey Pointer <robey@lag.net> patch-51 - - Summary: - fix utf8, raise packet size, log exceptions, be more lax with sfp servers - Revision: - secsh--dev--1.0--patch-51 - - explicitly import utf8 encodings for "freezing" (and also because not all - platforms come with utf8, apparently). raise the max acceptable packet size - to 8kB, cuz 2kB was too low. log exceptions at error level instead of debug - level. and don't reject older sftp servers. - - modified files: - paramiko/auth_transport.py paramiko/sftp.py - paramiko/transport.py - - -2004-04-23 22:55:16 GMT Robey Pointer <robey@lag.net> patch-50 - - Summary: - fearow date and last-minute fixes - Revision: - secsh--dev--1.0--patch-50 - - update release date of fearow to 23apr. fix channel._set_closed() to grab - the lock before notifying the in/out buffers that the channel is closed. - try roger's trick for finding the home folder on windows. - - modified files: - Makefile README paramiko/__init__.py paramiko/channel.py - paramiko/common.py - - -2004-04-08 06:31:08 GMT Robey Pointer <robey@lag.net> patch-49 - - Summary: - fix doc typos - Revision: - secsh--dev--1.0--patch-49 - - - modified files: - paramiko/dsskey.py paramiko/rsakey.py paramiko/transport.py - - -2004-04-08 05:48:16 GMT Robey Pointer <robey@lag.net> patch-48 - - Summary: - set version number to fearow - Revision: - secsh--dev--1.0--patch-48 - - set version number to fearow. - - modified files: - Makefile README paramiko/__init__.py setup.py - - -2004-04-08 05:12:20 GMT Robey Pointer <robey@lag.net> patch-47 - - Summary: - add socket.timeout for py22 - Revision: - secsh--dev--1.0--patch-47 - - oops, forgot this vital part of the py22 patches. roger binns sent me a - code patch that included this snip. - - modified files: - paramiko/common.py - - -2004-04-07 16:05:48 GMT Robey Pointer <robey@lag.net> patch-46 - - Summary: - README update notes - Revision: - secsh--dev--1.0--patch-46 - - added notes on what's new, what to watch out for in py22. added a "since: - fearow" to all the relevant API calls that are new. - - modified files: - README paramiko/auth_transport.py paramiko/dsskey.py - paramiko/pkey.py paramiko/rsakey.py paramiko/transport.py - - -2004-04-07 15:52:07 GMT Robey Pointer <robey@lag.net> patch-45 - - Summary: - add set_keepalive() - Revision: - secsh--dev--1.0--patch-45 - - add set_keepalive() to set an automatic keepalive mechanism. (while waiting - for a packet on a connection, we periodically check if it's time to send a - keepalive packet.) - - modified files: - paramiko/transport.py - - -2004-04-07 06:07:29 GMT Robey Pointer <robey@lag.net> patch-44 - - Summary: - add get_username() method for remembering who you auth'd as - Revision: - secsh--dev--1.0--patch-44 - - add get_username() method for remembering who you auth'd as. also, fix these - bugs: - * "continue" auth response counted as a failure (in server mode). - * try to import 'logging' in py22 before falling back to the fake logger, - in case they have a backported version of 'logger' - * raise the right exception when told to read a private key from a file that - isn't a private key file - * tell channels to close when the transport dies - - modified files: - paramiko/auth_transport.py paramiko/channel.py - paramiko/common.py paramiko/pkey.py paramiko/transport.py - - -2004-04-06 22:03:21 GMT Robey Pointer <robey@lag.net> patch-43 - - Summary: - fix encrypted private key files - Revision: - secsh--dev--1.0--patch-43 - - the random byte padding on private key files' BER data was confusing openssh, - so switch to null-byte padding, which is slightly less secure but works with - crappy old openssh. also, enforce the mode when writing the private key - file. we really really want it to be 0600. (python seems to ignore the - mode normally.) - - modified files: - paramiko/pkey.py - - -2004-04-06 08:16:02 GMT Robey Pointer <robey@lag.net> patch-42 - - Summary: - support py22, more or less - Revision: - secsh--dev--1.0--patch-42 - - add roger binns' patches for supporting python 2.2. i hedged a bit on the - logging stuff and just added some trickery to let logging be stubbed out for - python 2.2. this changed a lot of import statements but i managed to avoid - hacking at any of the existing logging. - - socket timeouts are required for the threads to notice when they've been - deactivated. worked around it by using the 'select' module on py22. - - also fixed the sftp unit tests to cope with a password-protected private key. - - new files: - paramiko/.arch-ids/logging22.py.id paramiko/logging22.py - - modified files: - README demo.py demo_server.py demo_simple.py - paramiko/__init__.py paramiko/auth_transport.py - paramiko/channel.py paramiko/common.py paramiko/kex_gex.py - paramiko/kex_group1.py paramiko/message.py paramiko/sftp.py - paramiko/transport.py paramiko/util.py tests/test_sftp.py - - -2004-04-05 22:32:03 GMT Robey Pointer <robey@lag.net> patch-41 - - Summary: - make get_remote_server_key() return a PKey object - Revision: - secsh--dev--1.0--patch-41 - - a good suggestion from roger binns: make get_remote_server_key() just return - a pkey object instead of a tuple of strings. all the strings can be extracted - from the pkey object, as well as other potentially useful things. - - modified files: - demo.py paramiko/transport.py - - -2004-04-05 19:36:40 GMT Robey Pointer <robey@lag.net> patch-40 - - Summary: - add dss key generation too, and fix some bugs - Revision: - secsh--dev--1.0--patch-40 - - added the ability to generate dss keys and write private dss key files, - similar to rsa. in the process, fixed a couple of bugs with ber encoding - and writing password-encrypted key files. the key has to be padded to the - iblock size of the cipher -- it's very difficult to determine how the others - do this, so i just add random bytes to the end. - - fixed the simple demo to use Transport's (host, port) constructor for - simplicity, and fixed a bug where the standard demo's DSS login wouldn't - work. - - also, move the common logfile setup crap into util so all the demos can just - call that one. - - modified files: - demo.py demo_simple.py paramiko/ber.py paramiko/dsskey.py - paramiko/pkey.py paramiko/rsakey.py paramiko/util.py - - -2004-04-05 10:37:18 GMT Robey Pointer <robey@lag.net> patch-39 - - Summary: - add global request mechanism - Revision: - secsh--dev--1.0--patch-39 - - add transport.global_request() to make a global-style request (usually an - extension to the protocol -- like keepalives) and handle requests from the - remote host. incoming requests are now handled and responded to correctly, - which should make openssh-style keepalives work. (before, we would silently - ignore them, which was wrong.) - - modified files: - paramiko/common.py paramiko/message.py paramiko/transport.py - - -2004-04-05 10:24:33 GMT Robey Pointer <robey@lag.net> patch-38 - - Summary: - add common.py file - Revision: - secsh--dev--1.0--patch-38 - - missing from previous change because tla doesn't like to add files in some - situations. (frown) - - - new files: - paramiko/.arch-ids/common.py.id paramiko/common.py - - -2004-04-05 10:16:31 GMT Robey Pointer <robey@lag.net> patch-37 - - Summary: - can now generate rsa keys (not dss yet) - Revision: - secsh--dev--1.0--patch-37 - - added functionality to ber to create ber streams. added some common methods - to PKey to allow dumping the key to base64 (the format used by openssh for - public key files and host key lists), and a factory for creating a key from - a private key file, and a common way to save private keys. RSAKey luckily - didn't have to change that much. - - also added a factory method to RSAKey to generate a new key. - - - modified files: - paramiko/ber.py paramiko/pkey.py paramiko/rsakey.py - - -2004-04-05 10:12:59 GMT Robey Pointer <robey@lag.net> patch-36 - - Summary: - add common.py for commonly used constants and globals - Revision: - secsh--dev--1.0--patch-36 - - common.py now stores the constants and globals. - lots of renaming because of this. - - modified files: - paramiko/auth_transport.py paramiko/channel.py - paramiko/kex_gex.py paramiko/kex_group1.py - paramiko/transport.py paramiko/util.py - - -2004-04-02 02:41:43 GMT Robey Pointer <robey@lag.net> patch-35 - - Summary: - add send_ignore - Revision: - secsh--dev--1.0--patch-35 - - add send_ignore() call to allow for sending garbage ignored packets to the - remote side. - - modified files: - paramiko/transport.py - - -2004-03-16 07:33:09 GMT Robey Pointer <robey@lag.net> patch-34 - - Summary: - fix some arcana in unpacking private keys - Revision: - secsh--dev--1.0--patch-34 - - "!= type([])" is a pretty obscure way to say it. let's try "is not list" - which is a lot more readable. - - (mostly this is a test to make sure tla is working okay on my laptop.) - - modified files: - paramiko/dsskey.py paramiko/rsakey.py - - -2004-03-09 01:09:17 GMT Robey Pointer <robey@lag.net> patch-33 - - Summary: - include tests in manifest - Revision: - secsh--dev--1.0--patch-33 - - include the tests in the manifest for dist, and remove some outdated notes in - NOTES about the exported API (this is doc'd wayyy better in epydoc now). - - modified files: - MANIFEST.in NOTES - - -2004-03-08 17:54:19 GMT Robey Pointer <robey@lag.net> patch-32 - - Summary: - add unit tests - Revision: - secsh--dev--1.0--patch-32 - - add unit tests for BufferedFile and SFTP (it's a start). remove the demo sftp - client because it was 99% copied from the other demos, which makes it kinda - confusing. the unit tests are a much better example. - - new files: - .arch-ids/test.py.id test.py tests/.arch-ids/=id - tests/.arch-ids/test_file.py.id - tests/.arch-ids/test_sftp.py.id tests/test_file.py - tests/test_sftp.py - - removed files: - .arch-ids/demo_sftp.py.id demo_sftp.py - - new directories: - tests tests/.arch-ids - - -2004-03-08 17:52:25 GMT Robey Pointer <robey@lag.net> patch-31 - - Summary: - bump version number to eevee - Revision: - secsh--dev--1.0--patch-31 - - bump the version number to eevee in a few places and talk about the unit - tests. - - modified files: - Makefile README paramiko/__init__.py setup.py - - -2004-03-08 17:50:49 GMT Robey Pointer <robey@lag.net> patch-30 - - Summary: - finish up client sftp support - Revision: - secsh--dev--1.0--patch-30 - - added 'stat' to SFTPFile and SFTP, documented 'open' and 'listdir', and added - 'rmdir', 'lstat', 'symlink', 'chmod', 'chown', 'utime', 'readlink'. - - turned off ultra debugging now that the unit tests are all working. - - modified files: - paramiko/sftp.py - - -2004-03-08 17:45:44 GMT Robey Pointer <robey@lag.net> patch-29 - - Summary: - fix some docs and BufferedFile.readline - Revision: - secsh--dev--1.0--patch-29 - - fix some documentation and fix readline()'s universal newline support to - always return strings ending with '\n', regardless of how they were in the - original file. (this is an obvious feature of python's universal newline - support that i somehow missed before.) - - modified files: - paramiko/file.py paramiko/message.py - - -2004-03-08 09:47:47 GMT Robey Pointer <robey@lag.net> patch-28 - - Summary: - fix lingering thread bug - Revision: - secsh--dev--1.0--patch-28 - - this bug has been in there forever and i could never figure out a workaround - till now. - - when the python interpreter exits, it doesn't necessarily destroy the - remaining objects or call __del__ on anything, and it will lock up until all - threads finish running. how the threads are supposed to notice the exiting - interpreter has always been sort of a mystery to me. - - tonight i figured out how to use the 'atexit' module to register a handler - that runs when the interpreter exits. now we keep a list of active threads - and ask them all to exit on shutdown. no more going to another shell to - kill -9 python! yeah!! - - modified files: - paramiko/transport.py - - -2004-03-04 08:21:45 GMT Robey Pointer <robey@lag.net> patch-27 - - Summary: - add BufferedFile abstraction - Revision: - secsh--dev--1.0--patch-27 - - SFTP client mode is mostly functional. there are probably still some bugs - but most of the operations on "file" objects have survived my simple tests. - - BufferedFile wraps a simpler stream in something that looks like a python - file (and can even handle seeking if the stream underneath supports it). - it's meant to be subclassed. most of it is ripped out of what used to be - ChannelFile so i can reuse it for sftp -- ChannelFile is now tiny. - - SFTP and Message are now exported. - - fixed util.format_binary_line to not quote spaces. - - new files: - .arch-ids/demo_sftp.py.id demo_sftp.py - paramiko/.arch-ids/file.py.id paramiko/.arch-ids/sftp.py.id - paramiko/file.py paramiko/sftp.py - - modified files: - paramiko/__init__.py paramiko/channel.py paramiko/message.py - paramiko/util.py - - -2004-01-27 02:04:59 GMT Robey Pointer <robey@lag.net> patch-26 - - Summary: - Transport constructor can take hostname or address tuple - Revision: - secsh--dev--1.0--patch-26 - - part of an ongoing attempt to make "simple" versions of some of the API calls, - so you can do common-case operations with just a few calls: - - Transport's constructor will now let you pass in a string or tuple instead - of a socket-like object. if you pass in a string, it assumes the string is - a hostname (with optional ":port" segment) and turns that into an address - tuple. if you pass in a tuple, it assumes it's an address tuple. in both - cases, it then creates a socket, connects to the given address, and then - continues as if that was the socket passed in. - - the idea being that you can call Transport('example.com') and it will do - the right thing. - - modified files: - paramiko/transport.py - - -2004-01-27 02:00:19 GMT Robey Pointer <robey@lag.net> patch-25 - - Summary: - pkey no longer raises binascii.Error - Revision: - secsh--dev--1.0--patch-25 - - catch binascii.Error in the private key decoder and convert it into an - SSHException. there's no reason people should have to care that it was a - decoding error vs. any of the other million things that could be wrong in - a corrupt key file. - - modified files: - paramiko/pkey.py - - -2004-01-27 01:45:44 GMT Robey Pointer <robey@lag.net> patch-24 - - Summary: - document more of Message; add get_int64 - Revision: - secsh--dev--1.0--patch-24 - - all of the get_* methods are now documented, but there's a bit more to do. - get_int64 added for eventual sftp support. - - modified files: - paramiko/message.py - - -2004-01-04 10:33:05 GMT Robey Pointer <robey@lag.net> patch-23 - - Summary: - quick doc fix. - Revision: - secsh--dev--1.0--patch-23 - - fix broken cross-link in kex_gex docs. - - modified files: - paramiko/kex_gex.py - - -2004-01-04 10:26:00 GMT Robey Pointer <robey@lag.net> patch-22 - - Summary: - fix MANIFEST.in, change version numbers to 0.9-doduo, fix LPGL notices - Revision: - secsh--dev--1.0--patch-22 - - fixed MANIFEST.in to include the demo scripts, LICENSE, and ChangeLog. - upped everything to version 0.9-doduo. - - fixed the copyright notice, and added the LGPL banner to the top of every - python file. - - modified files: - MANIFEST.in Makefile NOTES README paramiko/__init__.py - paramiko/auth_transport.py paramiko/ber.py paramiko/channel.py - paramiko/dsskey.py paramiko/kex_gex.py paramiko/kex_group1.py - paramiko/message.py paramiko/pkey.py paramiko/primes.py - paramiko/rsakey.py paramiko/ssh_exception.py - paramiko/transport.py paramiko/util.py setup.py - - -2004-01-04 10:07:35 GMT Robey Pointer <robey@lag.net> patch-21 - - Summary: - MANIFEST -> MANIFEST.in, fix setup.py. - Revision: - secsh--dev--1.0--patch-21 - - out with MANIFEST, in with MANIFEST.in. - - new files: - .arch-ids/MANIFEST.in.id MANIFEST.in - - removed files: - .arch-ids/MANIFEST.id MANIFEST - - modified files: - setup.py - - -2004-01-04 09:29:13 GMT Robey Pointer <robey@lag.net> patch-20 - - Summary: - more docs, and password-protected key files can now be read - Revision: - secsh--dev--1.0--patch-20 - - lots more documentation, some of it moved out of the README file, which is - now much smaller and less rambling. - - repr(Transport) now reports the number of bits used in the cipher. - - cleaned up BER to use util functions, and throw a proper exception (the new - BERException) on error. it doesn't ever have to be a full BER decoder, but - it can at least comb its hair and tuck in its shirt. - - lots of stuff added to PKey.read_private_key_file so it can try to decode - password-protected key files. right now it only understands "DES-EDE3-CBC" - format, but this is the only format i've seen openssh make so far. if the - key is password-protected, but no password was given, a new exception - (PasswordRequiredException) is raised so an outer layer can ask for a password - and try again. - - modified files: - README demo.py demo_server.py paramiko/__init__.py - paramiko/auth_transport.py paramiko/ber.py paramiko/channel.py - paramiko/dsskey.py paramiko/pkey.py paramiko/primes.py - paramiko/rsakey.py paramiko/ssh_exception.py - paramiko/transport.py paramiko/util.py - - -2003-12-31 06:31:43 GMT Robey Pointer <robey@lag.net> patch-19 - - Summary: - renamed auth_key -> auth_publickey; more docs. - Revision: - secsh--dev--1.0--patch-19 - - renamed Transport.auth_key to auth_publickey for consistency. and lots more - documentation. - - modified files: - README demo.py demo_server.py paramiko/__init__.py - paramiko/auth_transport.py paramiko/transport.py - - -2003-12-30 22:24:21 GMT Robey Pointer <robey@lag.net> patch-18 - - Summary: - added public-key support to server mode, more docs - Revision: - secsh--dev--1.0--patch-18 - - added public-key support to server mode (it can now verify a client signature) - and added a demo of that to the demo_server.py script (user_rsa_key). in the - process, cleaned up the API of PKey so that now it only has to know about - signing and verifying ssh2 blobs, and can be hashed and compared with other - keys (comparing & hashing only the public parts of the key). keys can also - be created from strings now too. - - some more documentation and hiding private methods. - - new files: - .arch-ids/user_rsa_key.id .arch-ids/user_rsa_key.pub.id - user_rsa_key user_rsa_key.pub - - modified files: - Makefile demo_server.py paramiko/__init__.py - paramiko/auth_transport.py paramiko/dsskey.py - paramiko/kex_gex.py paramiko/kex_group1.py paramiko/pkey.py - paramiko/rsakey.py paramiko/transport.py - - -2003-12-30 07:18:20 GMT Robey Pointer <robey@lag.net> patch-17 - - Summary: - lots more documentation, and added Transport.connect() - Revision: - secsh--dev--1.0--patch-17 - - renamed demo_host_key to demo_rsa_key. moved changelog to a separate file, - and indicated that future changelog entries should be fetched from tla. - tried to clean up "__all__" in a way that makes epydoc still work. - - added lots more documentation, and renamed many methods and vars to hide - them as private non-exported API. - - Transport's ModulusPack is now a static member, so it only has to be loaded - once, and can then be used by any future Transport object. - - added Transport.connect(), which tries to wrap all the SSH2 negotiation and - authentication into one method. you should be able to create a Transport, - call connect(), and then create channels. - - new files: - .arch-ids/ChangeLog.id .arch-ids/demo_simple.py.id ChangeLog - demo_simple.py paramiko/.arch-ids/pkey.py.id paramiko/pkey.py - - removed files: - .arch-ids/paramiko.py.id paramiko.py - - modified files: - Makefile NOTES README demo.py demo_server.py - paramiko/__init__.py paramiko/auth_transport.py - paramiko/channel.py paramiko/dsskey.py paramiko/kex_gex.py - paramiko/kex_group1.py paramiko/rsakey.py - paramiko/transport.py setup.py {arch}/=tagging-method - - renamed files: - .arch-ids/demo_host_key.id - ==> .arch-ids/demo_rsa_key.id - demo_host_key - ==> demo_rsa_key - - -2003-12-28 03:20:42 GMT Robey Pointer <robey@lag.net> patch-16 - - Summary: - hook up server-side kex-gex; add more documentation - Revision: - secsh--dev--1.0--patch-16 - - group-exchange kex should work now on the server side. it will only be - advertised if a "moduli" file has been loaded (see the -gasp- docs) so we - don't spend hours (literally. hours.) computing primes. some of the logic - was previously wrong, too, since it had never been tested. - - fixed repr() string for Transport/BaseTransport. moved is_authenticated to - Transport where it belongs. - - added lots of documentation (but still only about 10% documented). lots of - methods were made private finally. - - new files: - paramiko/.arch-ids/primes.py.id paramiko/primes.py - - modified files: - NOTES demo.py demo_server.py paramiko/__init__.py - paramiko/auth_transport.py paramiko/channel.py - paramiko/kex_gex.py paramiko/kex_group1.py - paramiko/transport.py paramiko/util.py - - -2003-12-27 02:03:44 GMT Robey Pointer <robey@lag.net> patch-15 - - Summary: - fix up new paramiko/ folder. - Revision: - secsh--dev--1.0--patch-15 - - moved SSHException to a new file (ssh_exception.py) and turned paramiko.py - into an __init__.py file. i'm still not entirely sure how this normally - works, so i may have done something wrong, but it's supposed to work the - same as before. - - new files: - paramiko/.arch-ids/__init__.py.id - paramiko/.arch-ids/ssh_exception.py.id paramiko/__init__.py - paramiko/ssh_exception.py - - modified files: - paramiko/auth_transport.py paramiko/channel.py - paramiko/dsskey.py paramiko/kex_gex.py paramiko/kex_group1.py - paramiko/transport.py - - -2003-12-27 01:49:19 GMT Robey Pointer <robey@lag.net> patch-14 - - Summary: - move the paramiko files into a paramiko/ folder. - Revision: - secsh--dev--1.0--patch-14 - - just moving the files into a folder. it won't build this way yet. - - new files: - paramiko/.arch-ids/=id - - renamed files: - .arch-ids/auth_transport.py.id - ==> paramiko/.arch-ids/auth_transport.py.id - .arch-ids/ber.py.id - ==> paramiko/.arch-ids/ber.py.id - .arch-ids/channel.py.id - ==> paramiko/.arch-ids/channel.py.id - .arch-ids/dsskey.py.id - ==> paramiko/.arch-ids/dsskey.py.id - .arch-ids/kex_gex.py.id - ==> paramiko/.arch-ids/kex_gex.py.id - .arch-ids/kex_group1.py.id - ==> paramiko/.arch-ids/kex_group1.py.id - .arch-ids/message.py.id - ==> paramiko/.arch-ids/message.py.id - .arch-ids/rsakey.py.id - ==> paramiko/.arch-ids/rsakey.py.id - .arch-ids/transport.py.id - ==> paramiko/.arch-ids/transport.py.id - .arch-ids/util.py.id - ==> paramiko/.arch-ids/util.py.id - auth_transport.py - ==> paramiko/auth_transport.py - ber.py - ==> paramiko/ber.py - channel.py - ==> paramiko/channel.py - dsskey.py - ==> paramiko/dsskey.py - kex_gex.py - ==> paramiko/kex_gex.py - kex_group1.py - ==> paramiko/kex_group1.py - message.py - ==> paramiko/message.py - rsakey.py - ==> paramiko/rsakey.py - transport.py - ==> paramiko/transport.py - util.py - ==> paramiko/util.py - - new directories: - paramiko paramiko/.arch-ids - - -2003-12-24 22:09:43 GMT Robey Pointer <robey@lag.net> patch-13 - - Summary: - fix a deadlock/race in handle_eof & close - Revision: - secsh--dev--1.0--patch-13 - - (patch from fred gansevles) - add locking around the eof handler and the close() call, so we can't be in - both simultaneously. - - modified files: - channel.py - - -2003-12-24 20:49:38 GMT Robey Pointer <robey@lag.net> patch-12 - - Summary: - fix dss key signing - Revision: - secsh--dev--1.0--patch-12 - - (expanded on a patch from fred gansevles) - add a demo dss key for server mode, and fix some bugs that had caused the dss - signing stuff to never work before. the demo_server is a bit more verbose - now, too. both key types (RSAKey & DSSKey) now have a function to return the - fingerprint of the key, and both versions of read_private_key_file() now raise - exceptions on failure, instead of just silently setting "valid" to false. - - new files: - .arch-ids/demo_dss_key.id demo_dss_key - - modified files: - demo_server.py dsskey.py kex_gex.py kex_group1.py paramiko.py - rsakey.py transport.py - - -2003-12-23 06:44:56 GMT Robey Pointer <robey@lag.net> patch-11 - - Summary: - in server mode, don't offer keys we don't have - Revision: - secsh--dev--1.0--patch-11 - - (from Paolo Losi) in server mode, when advertising which key methods we - support, don't list methods that we don't have any existing keys for. - - modified files: - transport.py - - -2003-12-23 06:36:27 GMT Robey Pointer <robey@lag.net> patch-10 - - Summary: - add logfiles and .pyc files to the "junk" list - Revision: - secsh--dev--1.0--patch-10 - - add *.log and *.pyc to the explicit junk list. - - modified files: - {arch}/=tagging-method - - -2003-11-10 08:49:50 GMT Robey Pointer <robey@lag.net> patch-9 - - Summary: - rename secsh -> paramiko - Revision: - secsh--dev--1.0--patch-9 - - also, rename SecshException back to SSHException. sigh. :) - - modified files: - ./MANIFEST ./Makefile ./NOTES ./README ./auth_transport.py - ./channel.py ./demo.py ./demo_server.py ./kex_gex.py - ./kex_group1.py ./message.py ./paramiko.py ./setup.py - ./transport.py - - renamed files: - ./.arch-ids/secsh.py.id - ==> ./.arch-ids/paramiko.py.id - ./secsh.py - ==> ./paramiko.py - - -2003-11-10 06:52:35 GMT Robey Pointer <robey@lag.net> patch-8 - - Summary: - doc changes - Revision: - secsh--dev--1.0--patch-8 - - - modified files: - ./README ./demo_server.py ./secsh.py - - -2003-11-10 04:54:02 GMT Robey Pointer <robey@lag.net> patch-7 - - Summary: - cleaned up server code, renamed some files & classes - Revision: - secsh--dev--1.0--patch-7 - - renamed demo-server.py and demo-host-key to demo_server.py and - demo_host_key, just to be consistent. - - renamed SSHException -> SecshException. - - generalized the mechanism where Channel decides whether to allow - different channel requests: 4 of the main ones (pty, window-change, - shell, and subsystem) go through easily override-able methods now. - you could probably make an actual ssh shell server. - - gave ChannelFile a repr(). - - turned off ultra debugging in the demos. demo_server creates a - subclass of Channel to allow pty/shell and sets an event when the - shell request is made, so that it knows when it can start sending - the fake bbs. - - renamed to charmander and updated some of the distutils files. - - modified files: - ./MANIFEST ./NOTES ./auth_transport.py ./channel.py ./demo.py - ./demo_server.py ./kex_gex.py ./kex_group1.py ./secsh.py - ./setup.py ./transport.py - - renamed files: - ./.arch-ids/demo-host-key.id - ==> ./.arch-ids/demo_host_key.id - ./.arch-ids/demo-server.py.id - ==> ./.arch-ids/demo_server.py.id - ./demo-host-key - ==> ./demo_host_key - ./demo-server.py - ==> ./demo_server.py - - -2003-11-09 21:16:35 GMT Robey Pointer <robey@lag.net> patch-6 - - Summary: - notes about the exported api - Revision: - secsh--dev--1.0--patch-6 - - just wrote some quick notes (for a few of the classes) about which - methods are intended to be the exported API. python has no decent - way of distinguishing private vs public. - - - modified files: - ./NOTES - - -2003-11-09 21:14:21 GMT Robey Pointer <robey@lag.net> patch-5 - - Summary: - big chunk of work which makes server code 95% done - Revision: - secsh--dev--1.0--patch-5 - - fixed auth check methods to return just a result (failed, succeeded, - partially succeeded) and always use get_allowed_auths to determine the - list of allowed auth methods to return. - - channel's internal API changed a bit to allow for client-side vs. - server-side channels. we now honor the "want-reply" bit from channel - requests. in server mode (for now), we automatically allow pty-req - and shell requests without doing anything. - - ChannelFile was fixed up a bit to support universal newlines. readline - got rewritten: the old way used the "greedy" read call from ChannelFile, - which won't work if the socket doesn't have that much data buffered and - ready. now it uses recv directly, and tracks the different newlines. - - demo-server.py now answers to a single shell request (like a CLI ssh - tool will make) and does a very simple demo pretending to be a BBS. - - transport: fixed a bug with parsing the remote side's banner. channel - requests are passed to another method in server mode, to determine if - we should allow it. new allowed channels are added to an accept queue, - and a new method 'accept' (with timeout) will block until the next - incoming channel is ready. - - - modified files: - ./auth_transport.py ./channel.py ./demo-server.py ./demo.py - ./transport.py - - -2003-11-09 20:59:51 GMT Robey Pointer <robey@lag.net> patch-4 - - Summary: - change kex-gex server code to generate primes by hand - Revision: - secsh--dev--1.0--patch-4 - - added a util function "generate_prime" to compare to the incredibly slow C - version, but it's no faster of course. i think kex-gex from the server is - just not going to be feasible without having a separate thread generate some - primes in the background to have handy when a request comes in. so in short, - this still doesn't work. - - also i put bit_length into util and a tb_strings function which gets stack - traceback info and splits it into a list of strings. - - - modified files: - ./kex_gex.py ./util.py - - -2003-11-07 10:36:42 GMT Robey Pointer <robey@lag.net> patch-3 - - Summary: - remove some leftover garbage from dsskey - Revision: - secsh--dev--1.0--patch-3 - - leftover from a cut & paste i was doing a few days ago. bad robey. - - modified files: - ./dsskey.py - - -2003-11-06 07:34:27 GMT Robey Pointer <robey@lag.net> patch-2 - - Summary: - add a demo host key and point demo-server at it. - Revision: - secsh--dev--1.0--patch-2 - - also, temporarily comment out the nonfunctional kex-gex method. - - new files: - ./.arch-ids/demo-host-key.id ./demo-host-key - - modified files: - ./demo-server.py ./transport.py - - -2003-11-04 08:50:22 GMT Robey Pointer <robey@lag.net> patch-1 - - Summary: - no changes - Revision: - secsh--dev--1.0--patch-1 - - why aren't my log messages kept? - - modified files: - ./kex_gex.py - - new patches: - robey@lag.net--2003/secsh--dev--1.0--patch-1 - - -2003-11-04 08:34:24 GMT Robey Pointer <robey@lag.net> base-0 - - Summary: - initial import - Revision: - secsh--dev--1.0--base-0 - - - (automatically generated log message) - - new files: - ./LICENSE ./MANIFEST ./Makefile ./NOTES ./README - ./auth_transport.py ./ber.py ./channel.py ./demo-server.py - ./demo.py ./dsskey.py ./kex_gex.py ./kex_group1.py - ./message.py ./rsakey.py ./secsh.py ./setup.py ./transport.py - ./util.py - - new patches: - robey@lag.net--2003/secsh--dev--1.0--base-0 - - diff --git a/README b/README deleted file mode 100644 index 57512604..00000000 --- a/README +++ /dev/null @@ -1,142 +0,0 @@ - -======== -paramiko -======== - -:Paramiko: Python SSH module -:Copyright: Copyright (c) 2003-2009 Robey Pointer <robeypointer@gmail.com> -:Copyright: Copyright (c) 2013-2015 Jeff Forcier <jeff@bitprophet.org> -:License: LGPL -:Homepage: https://github.com/paramiko/paramiko/ -:API docs: http://docs.paramiko.org - - -What ----- - -"paramiko" is a combination of the esperanto words for "paranoid" and -"friend". it's a module for python 2.6+ that implements the SSH2 protocol -for secure (encrypted and authenticated) connections to remote machines. -unlike SSL (aka TLS), SSH2 protocol does not require hierarchical -certificates signed by a powerful central authority. you may know SSH2 as -the protocol that replaced telnet and rsh for secure access to remote -shells, but the protocol also includes the ability to open arbitrary -channels to remote services across the encrypted tunnel (this is how sftp -works, for example). - -it is written entirely in python (no C or platform-dependent code) and is -released under the GNU LGPL (lesser GPL). - -the package and its API is fairly well documented in the "doc/" folder -that should have come with this archive. - - -Requirements ------------- - - - Python 2.6 or better <http://www.python.org/> - this includes Python - 3.2 and higher as well. - - pycrypto 2.1 or better <https://www.dlitz.net/software/pycrypto/> - - ecdsa 0.9 or better <https://pypi.python.org/pypi/ecdsa> - -If you have setuptools, you can build and install paramiko and all its -dependencies with this command (as root):: - - easy_install ./ - - -Portability ------------ - -i code and test this library on Linux and MacOS X. for that reason, i'm -pretty sure that it works for all posix platforms, including MacOS. it -should also work on Windows, though i don't test it as frequently there. -if you run into Windows problems, send me a patch: portability is important -to me. - -some python distributions don't include the utf-8 string encodings, for -reasons of space (misdirected as that is). if your distribution is -missing encodings, you'll see an error like this:: - - LookupError: no codec search functions registered: can't find encoding - -this means you need to copy string encodings over from a working system. -(it probably only happens on embedded systems, not normal python -installs.) Valeriy Pogrebitskiy says the best place to look is -``.../lib/python*/encodings/__init__.py``. - - -Bugs & Support --------------- - -Please file bug reports at https://github.com/paramiko/paramiko/. There is currently no mailing list but we plan to create a new one ASAP. - - -Kerberos Support ----------------- - -Paramiko ships with optional Kerberos/GSSAPI support; for info on the extra -dependencies for this, see the 'GSS-API' section on the 'Installation' page of -our main website, http://paramiko.org . - - -Demo ----- - -several demo scripts come with paramiko to demonstrate how to use it. -probably the simplest demo of all is this:: - - import paramiko, base64 - key = paramiko.RSAKey(data=base64.decodestring('AAA...')) - client = paramiko.SSHClient() - client.get_host_keys().add('ssh.example.com', 'ssh-rsa', key) - client.connect('ssh.example.com', username='strongbad', password='thecheat') - stdin, stdout, stderr = client.exec_command('ls') - for line in stdout: - print '... ' + line.strip('\n') - client.close() - -...which prints out the results of executing ``ls`` on a remote server. -(the host key 'AAA...' should of course be replaced by the actual base64 -encoding of the host key. if you skip host key verification, the -connection is not secure!) - -the following example scripts (in demos/) get progressively more detailed: - -:demo_simple.py: - calls invoke_shell() and emulates a terminal/tty through which you can - execute commands interactively on a remote server. think of it as a - poor man's ssh command-line client. - -:demo.py: - same as demo_simple.py, but allows you to authenticiate using a - private key, attempts to use an SSH-agent if present, and uses the long - form of some of the API calls. - -:forward.py: - command-line script to set up port-forwarding across an ssh transport. - (requires python 2.3.) - -:demo_sftp.py: - opens an sftp session and does a few simple file operations. - -:demo_server.py: - an ssh server that listens on port 2200 and accepts a login for - 'robey' (password 'foo'), and pretends to be a BBS. meant to be a - very simple demo of writing an ssh server. - -:demo_keygen.py: - an key generator similar to openssh ssh-keygen(1) program with - paramiko keys generation and progress functions. - -Use ---- - -the demo scripts are probably the best example of how to use this package. -there is also a lot of documentation, generated with Sphinx autodoc, in the doc/ folder. - -there are also unit tests here:: - - $ python ./test.py - -which will verify that most of the core components are working correctly. diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..e267f69a --- /dev/null +++ b/README.rst @@ -0,0 +1,137 @@ +======== +Paramiko +======== + +.. Continuous integration and code coverage badges + +.. image:: https://travis-ci.org/paramiko/paramiko.svg?branch=master + :target: https://travis-ci.org/paramiko/paramiko +.. image:: https://coveralls.io/repos/paramiko/paramiko/badge.svg?branch=master&service=github + :target: https://coveralls.io/github/paramiko/paramiko?branch=master + +:Paramiko: Python SSH module +:Copyright: Copyright (c) 2003-2009 Robey Pointer <robeypointer@gmail.com> +:Copyright: Copyright (c) 2013-2017 Jeff Forcier <jeff@bitprophet.org> +:License: `LGPL <https://www.gnu.org/copyleft/lesser.html>`_ +:Homepage: http://www.paramiko.org/ +:API docs: http://docs.paramiko.org +:Development: https://github.com/paramiko/paramiko + + +What +---- + +"Paramiko" is a combination of the Esperanto words for "paranoid" and +"friend". It's a module for Python 2.6+/3.3+ that implements the SSH2 protocol +for secure (encrypted and authenticated) connections to remote machines. Unlike +SSL (aka TLS), SSH2 protocol does not require hierarchical certificates signed +by a powerful central authority. You may know SSH2 as the protocol that +replaced Telnet and rsh for secure access to remote shells, but the protocol +also includes the ability to open arbitrary channels to remote services across +the encrypted tunnel (this is how SFTP works, for example). + +It is written entirely in Python (though it depends on third-party C wrappers +for low level crypto; these are often available precompiled) and is released +under the GNU Lesser General Public License (`LGPL +<https://www.gnu.org/copyleft/lesser.html>`_). + +The package and its API is fairly well documented in the ``docs`` folder that +should have come with this repository. + + +Installation +------------ + +For most users, the recommended method to install is via pip:: + + pip install paramiko + +For more detailed instructions, see the `Installing +<http://www.paramiko.org/installing.html>`_ page on the main Paramiko website. + + +Portability Issues +------------------ + +Paramiko primarily supports POSIX platforms with standard OpenSSH +implementations, and is most frequently tested on Linux and OS X. Windows is +supported as well, though it may not be as straightforward. + +Bugs & Support +-------------- + +:Bug Reports: `Github <https://github.com/paramiko/paramiko/issues/>`_ +:Mailing List: ``paramiko@librelist.com`` (see the `LibreList website + <http://librelist.com/>`_ for usage details). +:IRC: ``#paramiko`` on Freenode + + +Kerberos Support +---------------- + +Paramiko ships with optional Kerberos/GSSAPI support; for info on the extra +dependencies for this, see the `GSS-API section +<http://www.paramiko.org/installing.html#gssapi>`_ +on the main Paramiko website. + + +Demo +---- + +Several demo scripts come with Paramiko to demonstrate how to use it. +Probably the simplest demo is this:: + + import base64 + import paramiko + key = paramiko.RSAKey(data=base64.b64decode(b'AAA...')) + client = paramiko.SSHClient() + client.get_host_keys().add('ssh.example.com', 'ssh-rsa', key) + client.connect('ssh.example.com', username='strongbad', password='thecheat') + stdin, stdout, stderr = client.exec_command('ls') + for line in stdout: + print('... ' + line.strip('\n')) + client.close() + +This prints out the results of executing ``ls`` on a remote server. The host +key ``b'AAA...'`` should of course be replaced by the actual base64 encoding of the +host key. If you skip host key verification, the connection is not secure! + +The following example scripts (in demos/) get progressively more detailed: + +:demo_simple.py: + Calls invoke_shell() and emulates a terminal/TTY through which you can + execute commands interactively on a remote server. Think of it as a + poor man's SSH command-line client. + +:demo.py: + Same as demo_simple.py, but allows you to authenticate using a private + key, attempts to use an SSH agent if present, and uses the long form of + some of the API calls. + +:forward.py: + Command-line script to set up port-forwarding across an SSH transport. + +:demo_sftp.py: + Opens an SFTP session and does a few simple file operations. + +:demo_server.py: + An SSH server that listens on port 2200 and accepts a login for + 'robey' (password 'foo'), and pretends to be a BBS. Meant to be a + very simple demo of writing an SSH server. + +:demo_keygen.py: + A key generator similar to OpenSSH ``ssh-keygen(1)`` program with + Paramiko keys generation and progress functions. + +Use +--- + +The demo scripts are probably the best example of how to use this package. +Also a lot of documentation is generated by Sphinx autodoc, in the +doc/ folder. + +There are also unit tests here:: + + $ python ./test.py + +Which will verify that most of the core components are working correctly. diff --git a/demos/demo_server.py b/demos/demo_server.py index c4af9b10..3a7ec854 100644 --- a/demos/demo_server.py +++ b/demos/demo_server.py @@ -40,7 +40,7 @@ print('Read key: ' + u(hexlify(host_key.get_fingerprint()))) class Server (paramiko.ServerInterface): - # 'data' is the output of base64.encodestring(str(key)) + # 'data' is the output of base64.b64encode(key) # (using the "user_rsa_key" files) data = (b'AAAAB3NzaC1yc2EAAAABIwAAAIEAyO4it3fHlmGZWJaGrfeHOVY7RWO3P9M7hp' b'fAu7jJ2d7eothvfeuoRFtJwhUmZDluRdFyhFY/hFAh76PJKGAusIqIQKlkJxMC' @@ -96,9 +96,7 @@ class Server (paramiko.ServerInterface): return paramiko.AUTH_FAILED def enable_auth_gssapi(self): - UseGSSAPI = True - GSSAPICleanupCredentials = False - return UseGSSAPI + return True def get_allowed_auths(self, username): return 'gssapi-keyex,gssapi-with-mic,password,publickey' diff --git a/dev-requirements.txt b/dev-requirements.txt index 7a0ccbc5..716f432d 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,9 +1,12 @@ # Older junk tox>=1.4,<1.5 # For newer tasks like building Sphinx docs. -invoke>=0.7.0,<0.8 -invocations>=0.5.0 -sphinx>=1.1.3 -alabaster>=0.6.1 -releases>=0.5.2 -wheel==0.23.0 +invoke>=0.13,<2.0 +invocations>=0.13,<2.0 +sphinx>=1.1.3,<1.5 +alabaster>=0.7.5,<2.0 +releases>=1.1.0,<2.0 +semantic_version<3.0 +wheel==0.24 +twine==1.5 +flake8==2.6.2 diff --git a/paramiko/__init__.py b/paramiko/__init__.py index 9e2ba013..d67ad62f 100644 --- a/paramiko/__init__.py +++ b/paramiko/__init__.py @@ -16,6 +16,7 @@ # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +# flake8: noqa import sys from paramiko._version import __version__, __version_info__ @@ -28,17 +29,23 @@ __license__ = "GNU Lesser General Public License (LGPL)" from paramiko.transport import SecurityOptions, Transport -from paramiko.client import SSHClient, MissingHostKeyPolicy, AutoAddPolicy, RejectPolicy, WarningPolicy +from paramiko.client import ( + SSHClient, MissingHostKeyPolicy, AutoAddPolicy, RejectPolicy, + WarningPolicy, +) from paramiko.auth_handler import AuthHandler from paramiko.ssh_gss import GSSAuth, GSS_AUTH_AVAILABLE from paramiko.channel import Channel, ChannelFile -from paramiko.ssh_exception import SSHException, PasswordRequiredException, \ - BadAuthenticationType, ChannelException, BadHostKeyException, \ - AuthenticationException, ProxyCommandFailure +from paramiko.ssh_exception import ( + SSHException, PasswordRequiredException, BadAuthenticationType, + ChannelException, BadHostKeyException, AuthenticationException, + ProxyCommandFailure, +) from paramiko.server import ServerInterface, SubsystemHandler, InteractiveQuery from paramiko.rsakey import RSAKey from paramiko.dsskey import DSSKey from paramiko.ecdsakey import ECDSAKey +from paramiko.ed25519key import Ed25519Key from paramiko.sftp import SFTPError, BaseSFTP from paramiko.sftp_client import SFTP, SFTPClient from paramiko.sftp_server import SFTPServer @@ -55,49 +62,56 @@ from paramiko.hostkeys import HostKeys from paramiko.config import SSHConfig from paramiko.proxy import ProxyCommand -from paramiko.common import AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED, \ - OPEN_SUCCEEDED, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_FAILED_CONNECT_FAILED, \ - OPEN_FAILED_UNKNOWN_CHANNEL_TYPE, OPEN_FAILED_RESOURCE_SHORTAGE +from paramiko.common import ( + AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED, OPEN_SUCCEEDED, + OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_FAILED_CONNECT_FAILED, + OPEN_FAILED_UNKNOWN_CHANNEL_TYPE, OPEN_FAILED_RESOURCE_SHORTAGE, +) -from paramiko.sftp import SFTP_OK, SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, \ - SFTP_BAD_MESSAGE, SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, SFTP_OP_UNSUPPORTED +from paramiko.sftp import ( + SFTP_OK, SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, + SFTP_BAD_MESSAGE, SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, + SFTP_OP_UNSUPPORTED, +) from paramiko.common import io_sleep -__all__ = [ 'Transport', - 'SSHClient', - 'MissingHostKeyPolicy', - 'AutoAddPolicy', - 'RejectPolicy', - 'WarningPolicy', - 'SecurityOptions', - 'SubsystemHandler', - 'Channel', - 'PKey', - 'RSAKey', - 'DSSKey', - 'Message', - 'SSHException', - 'AuthenticationException', - 'PasswordRequiredException', - 'BadAuthenticationType', - 'ChannelException', - 'BadHostKeyException', - 'ProxyCommand', - 'ProxyCommandFailure', - 'SFTP', - 'SFTPFile', - 'SFTPHandle', - 'SFTPClient', - 'SFTPServer', - 'SFTPError', - 'SFTPAttributes', - 'SFTPServerInterface', - 'ServerInterface', - 'BufferedFile', - 'Agent', - 'AgentKey', - 'HostKeys', - 'SSHConfig', - 'util', - 'io_sleep' ] +__all__ = [ + 'Transport', + 'SSHClient', + 'MissingHostKeyPolicy', + 'AutoAddPolicy', + 'RejectPolicy', + 'WarningPolicy', + 'SecurityOptions', + 'SubsystemHandler', + 'Channel', + 'PKey', + 'RSAKey', + 'DSSKey', + 'Message', + 'SSHException', + 'AuthenticationException', + 'PasswordRequiredException', + 'BadAuthenticationType', + 'ChannelException', + 'BadHostKeyException', + 'ProxyCommand', + 'ProxyCommandFailure', + 'SFTP', + 'SFTPFile', + 'SFTPHandle', + 'SFTPClient', + 'SFTPServer', + 'SFTPError', + 'SFTPAttributes', + 'SFTPServerInterface', + 'ServerInterface', + 'BufferedFile', + 'Agent', + 'AgentKey', + 'HostKeys', + 'SSHConfig', + 'util', + 'io_sleep', +] diff --git a/paramiko/_version.py b/paramiko/_version.py index 3bf9dac7..2ad47eb4 100644 --- a/paramiko/_version.py +++ b/paramiko/_version.py @@ -1,2 +1,2 @@ -__version_info__ = (1, 15, 2) +__version_info__ = (2, 1, 2) __version__ = '.'.join(map(str, __version_info__)) diff --git a/paramiko/_winapi.py b/paramiko/_winapi.py index d6aabf76..a13d7e87 100644 --- a/paramiko/_winapi.py +++ b/paramiko/_winapi.py @@ -1,23 +1,16 @@ """ Windows API functions implemented as ctypes functions and classes as found -in jaraco.windows (2.10). +in jaraco.windows (3.4.1). If you encounter issues with this module, please consider reporting the issues in jaraco.windows and asking the author to port the fixes back here. """ -import ctypes +import sys import ctypes.wintypes -from paramiko.py3compat import u -try: - import builtins -except ImportError: - import __builtin__ as builtins -try: - USHORT = ctypes.wintypes.USHORT -except AttributeError: - USHORT = ctypes.c_ushort +from paramiko.py3compat import u, builtins + ###################### # jaraco.windows.error @@ -29,11 +22,7 @@ def format_system_message(errno): """ # first some flags used by FormatMessageW ALLOCATE_BUFFER = 0x100 - ARGUMENT_ARRAY = 0x2000 - FROM_HMODULE = 0x800 - FROM_STRING = 0x400 FROM_SYSTEM = 0x1000 - IGNORE_INSERTS = 0x200 # Let FormatMessageW allocate the buffer (we'll free it below) # Also, let it know we want a system error message. @@ -44,7 +33,7 @@ def format_system_message(errno): result_buffer = ctypes.wintypes.LPWSTR() buffer_size = 0 arguments = None - format_bytes = ctypes.windll.kernel32.FormatMessageW( + bytes = ctypes.windll.kernel32.FormatMessageW( flags, source, message_id, @@ -56,20 +45,25 @@ def format_system_message(errno): # note the following will cause an infinite loop if GetLastError # repeatedly returns an error that cannot be formatted, although # this should not happen. - handle_nonzero_success(format_bytes) + handle_nonzero_success(bytes) message = result_buffer.value ctypes.windll.kernel32.LocalFree(result_buffer) return message class WindowsError(builtins.WindowsError): - "more info about errors at http://msdn.microsoft.com/en-us/library/ms681381(VS.85).aspx" + """more info about errors at + http://msdn.microsoft.com/en-us/library/ms681381(VS.85).aspx""" def __init__(self, value=None): if value is None: value = ctypes.windll.kernel32.GetLastError() strerror = format_system_message(value) - super(WindowsError, self).__init__(value, strerror) + if sys.version_info > (3, 3): + args = 0, strerror, None, value + else: + args = value, strerror + super(WindowsError, self).__init__(*args) @property def message(self): @@ -85,11 +79,33 @@ class WindowsError(builtins.WindowsError): def __repr__(self): return '{self.__class__.__name__}({self.winerror})'.format(**vars()) + def handle_nonzero_success(result): if result == 0: raise WindowsError() +########################### +# jaraco.windows.api.memory + +GMEM_MOVEABLE = 0x2 + +GlobalAlloc = ctypes.windll.kernel32.GlobalAlloc +GlobalAlloc.argtypes = ctypes.wintypes.UINT, ctypes.c_size_t +GlobalAlloc.restype = ctypes.wintypes.HANDLE + +GlobalLock = ctypes.windll.kernel32.GlobalLock +GlobalLock.argtypes = ctypes.wintypes.HGLOBAL, +GlobalLock.restype = ctypes.wintypes.LPVOID + +GlobalUnlock = ctypes.windll.kernel32.GlobalUnlock +GlobalUnlock.argtypes = ctypes.wintypes.HGLOBAL, +GlobalUnlock.restype = ctypes.wintypes.BOOL + +GlobalSize = ctypes.windll.kernel32.GlobalSize +GlobalSize.argtypes = ctypes.wintypes.HGLOBAL, +GlobalSize.restype = ctypes.c_size_t + CreateFileMapping = ctypes.windll.kernel32.CreateFileMappingW CreateFileMapping.argtypes = [ ctypes.wintypes.HANDLE, @@ -104,6 +120,22 @@ CreateFileMapping.restype = ctypes.wintypes.HANDLE MapViewOfFile = ctypes.windll.kernel32.MapViewOfFile MapViewOfFile.restype = ctypes.wintypes.HANDLE +UnmapViewOfFile = ctypes.windll.kernel32.UnmapViewOfFile +UnmapViewOfFile.argtypes = ctypes.wintypes.HANDLE, + +RtlMoveMemory = ctypes.windll.kernel32.RtlMoveMemory +RtlMoveMemory.argtypes = ( + ctypes.c_void_p, + ctypes.c_void_p, + ctypes.c_size_t, +) + +ctypes.windll.kernel32.LocalFree.argtypes = ctypes.wintypes.HLOCAL, + +##################### +# jaraco.windows.mmap + + class MemoryMap(object): """ A memory map object which can have security attributes overridden. @@ -136,10 +168,13 @@ class MemoryMap(object): self.pos = pos def write(self, msg): + assert isinstance(msg, bytes) n = len(msg) if self.pos + n >= self.length: # A little safety. raise ValueError("Refusing to write %d bytes" % n) - ctypes.windll.kernel32.RtlMoveMemory(self.view + self.pos, msg, n) + dest = self.view + self.pos + length = ctypes.c_size_t(n) + ctypes.windll.kernel32.RtlMoveMemory(dest, msg, length) self.pos += n def read(self, n): @@ -147,7 +182,9 @@ class MemoryMap(object): Read n bytes from mapped view. """ out = ctypes.create_string_buffer(n) - ctypes.windll.kernel32.RtlMoveMemory(out, self.view + self.pos, n) + source = self.view + self.pos + length = ctypes.c_size_t(n) + ctypes.windll.kernel32.RtlMoveMemory(out, source, length) self.pos += n return out.raw @@ -155,12 +192,79 @@ class MemoryMap(object): ctypes.windll.kernel32.UnmapViewOfFile(self.view) ctypes.windll.kernel32.CloseHandle(self.filemap) -######################### -# jaraco.windows.security + +############################# +# jaraco.windows.api.security + +# from WinNT.h +READ_CONTROL = 0x00020000 +STANDARD_RIGHTS_REQUIRED = 0x000F0000 +STANDARD_RIGHTS_READ = READ_CONTROL +STANDARD_RIGHTS_WRITE = READ_CONTROL +STANDARD_RIGHTS_EXECUTE = READ_CONTROL +STANDARD_RIGHTS_ALL = 0x001F0000 + +# from NTSecAPI.h +POLICY_VIEW_LOCAL_INFORMATION = 0x00000001 +POLICY_VIEW_AUDIT_INFORMATION = 0x00000002 +POLICY_GET_PRIVATE_INFORMATION = 0x00000004 +POLICY_TRUST_ADMIN = 0x00000008 +POLICY_CREATE_ACCOUNT = 0x00000010 +POLICY_CREATE_SECRET = 0x00000020 +POLICY_CREATE_PRIVILEGE = 0x00000040 +POLICY_SET_DEFAULT_QUOTA_LIMITS = 0x00000080 +POLICY_SET_AUDIT_REQUIREMENTS = 0x00000100 +POLICY_AUDIT_LOG_ADMIN = 0x00000200 +POLICY_SERVER_ADMIN = 0x00000400 +POLICY_LOOKUP_NAMES = 0x00000800 +POLICY_NOTIFICATION = 0x00001000 + +POLICY_ALL_ACCESS = ( + STANDARD_RIGHTS_REQUIRED | + POLICY_VIEW_LOCAL_INFORMATION | + POLICY_VIEW_AUDIT_INFORMATION | + POLICY_GET_PRIVATE_INFORMATION | + POLICY_TRUST_ADMIN | + POLICY_CREATE_ACCOUNT | + POLICY_CREATE_SECRET | + POLICY_CREATE_PRIVILEGE | + POLICY_SET_DEFAULT_QUOTA_LIMITS | + POLICY_SET_AUDIT_REQUIREMENTS | + POLICY_AUDIT_LOG_ADMIN | + POLICY_SERVER_ADMIN | + POLICY_LOOKUP_NAMES) + + +POLICY_READ = ( + STANDARD_RIGHTS_READ | + POLICY_VIEW_AUDIT_INFORMATION | + POLICY_GET_PRIVATE_INFORMATION) + +POLICY_WRITE = ( + STANDARD_RIGHTS_WRITE | + POLICY_TRUST_ADMIN | + POLICY_CREATE_ACCOUNT | + POLICY_CREATE_SECRET | + POLICY_CREATE_PRIVILEGE | + POLICY_SET_DEFAULT_QUOTA_LIMITS | + POLICY_SET_AUDIT_REQUIREMENTS | + POLICY_AUDIT_LOG_ADMIN | + POLICY_SERVER_ADMIN) + +POLICY_EXECUTE = ( + STANDARD_RIGHTS_EXECUTE | + POLICY_VIEW_LOCAL_INFORMATION | + POLICY_LOOKUP_NAMES) + + +class TokenAccess: + TOKEN_QUERY = 0x8 + class TokenInformationClass: TokenUser = 1 + class TOKEN_USER(ctypes.Structure): num = 1 _fields_ = [ @@ -182,7 +286,7 @@ class SECURITY_DESCRIPTOR(ctypes.Structure): PACL Dacl; } SECURITY_DESCRIPTOR; """ - SECURITY_DESCRIPTOR_CONTROL = USHORT + SECURITY_DESCRIPTOR_CONTROL = ctypes.wintypes.USHORT REVISION = 1 _fields_ = [ @@ -195,6 +299,7 @@ class SECURITY_DESCRIPTOR(ctypes.Structure): ('Dacl', ctypes.c_void_p), ] + class SECURITY_ATTRIBUTES(ctypes.Structure): """ typedef struct _SECURITY_ATTRIBUTES { @@ -219,8 +324,19 @@ class SECURITY_ATTRIBUTES(ctypes.Structure): @descriptor.setter def descriptor(self, value): - self._descriptor = descriptor - self.lpSecurityDescriptor = ctypes.addressof(descriptor) + self._descriptor = value + self.lpSecurityDescriptor = ctypes.addressof(value) + + +ctypes.windll.advapi32.SetSecurityDescriptorOwner.argtypes = ( + ctypes.POINTER(SECURITY_DESCRIPTOR), + ctypes.c_void_p, + ctypes.wintypes.BOOL, +) + +######################### +# jaraco.windows.security + def GetTokenInformation(token, information_class): """ @@ -236,8 +352,6 @@ def GetTokenInformation(token, information_class): ctypes.byref(data_size))) return ctypes.cast(data, ctypes.POINTER(TOKEN_USER)).contents -class TokenAccess: - TOKEN_QUERY = 0x8 def OpenProcessToken(proc_handle, access): result = ctypes.wintypes.HANDLE() @@ -246,6 +360,7 @@ def OpenProcessToken(proc_handle, access): proc_handle, access, ctypes.byref(result))) return result + def get_current_user(): """ Return a TOKEN_USER for the owner of this process. @@ -256,6 +371,7 @@ def get_current_user(): ) return GetTokenInformation(process, TOKEN_USER) + def get_security_attributes_for_user(user=None): """ Return a SECURITY_ATTRIBUTES structure with the SID set to the diff --git a/paramiko/agent.py b/paramiko/agent.py index a75ac59e..bc857efa 100644 --- a/paramiko/agent.py +++ b/paramiko/agent.py @@ -32,7 +32,7 @@ from select import select from paramiko.common import asbytes, io_sleep from paramiko.py3compat import byte_chr -from paramiko.ssh_exception import SSHException +from paramiko.ssh_exception import SSHException, AuthenticationException from paramiko.message import Message from paramiko.pkey import PKey from paramiko.util import retry_on_signal @@ -109,12 +109,23 @@ class AgentProxyThread(threading.Thread): def run(self): try: (r, addr) = self.get_connection() + # Found that r should be either + # a socket from the socket library or None self.__inr = r + # The address should be an IP address as a string? or None self.__addr = addr self._agent.connect() + if ( + not isinstance(self._agent, int) and + ( + self._agent._conn is None or + not hasattr(self._agent._conn, 'fileno') + ) + ): + raise AuthenticationException("Unable to connect to SSH agent") self._communicate() except: - #XXX Not sure what to do here ... raise or pass ? + # XXX Not sure what to do here ... raise or pass ? raise def _communicate(self): @@ -210,7 +221,8 @@ class AgentClientProxy(object): if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'): conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: - retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK'])) + retry_on_signal( + lambda: conn.connect(os.environ['SSH_AUTH_SOCK'])) except: # probably a dangling env var: the ssh agent is gone return @@ -241,7 +253,7 @@ class AgentServerProxy(AgentSSH): """ :param .Transport t: Transport used for SSH Agent communication forwarding - :raises SSHException: mostly if we lost the agent + :raises: `.SSHException` -- mostly if we lost the agent """ def __init__(self, t): AgentSSH.__init__(self) @@ -287,6 +299,26 @@ class AgentServerProxy(AgentSSH): class AgentRequestHandler(object): + """ + Primary/default implementation of SSH agent forwarding functionality. + + Simply instantiate this class, handing it a live command-executing session + object, and it will handle forwarding any local SSH agent processes it + finds. + + For example:: + + # Connect + client = SSHClient() + client.connect(host, port, username) + # Obtain session + session = client.get_transport().open_session() + # Forward local agent + AgentRequestHandler(session) + # Commands executed after this point will see the forwarded agent on + # the remote end. + session.exec_command("git clone https://my.git.repository/") + """ def __init__(self, chanClient): self._conn = None self.__chanC = chanClient @@ -308,14 +340,14 @@ class Agent(AgentSSH): """ Client interface for using private keys from an SSH agent running on the local machine. If an SSH agent is running, this class can be used to - connect to it and retreive `.PKey` objects which can be used when + connect to it and retrieve `.PKey` objects which can be used when attempting to authenticate to remote SSH servers. Upon initialization, a session with the local machine's SSH agent is opened, if one is running. If no agent is running, initialization will succeed, but `get_keys` will return an empty tuple. - :raises SSHException: + :raises: `.SSHException` -- if an SSH agent is found, but speaks an incompatible protocol """ def __init__(self): diff --git a/paramiko/auth_handler.py b/paramiko/auth_handler.py index c001aeee..ae88179e 100644 --- a/paramiko/auth_handler.py +++ b/paramiko/auth_handler.py @@ -21,25 +21,28 @@ """ import weakref -from paramiko.common import cMSG_SERVICE_REQUEST, cMSG_DISCONNECT, \ - DISCONNECT_SERVICE_NOT_AVAILABLE, DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE, \ - cMSG_USERAUTH_REQUEST, cMSG_SERVICE_ACCEPT, DEBUG, AUTH_SUCCESSFUL, INFO, \ - cMSG_USERAUTH_SUCCESS, cMSG_USERAUTH_FAILURE, AUTH_PARTIALLY_SUCCESSFUL, \ - cMSG_USERAUTH_INFO_REQUEST, WARNING, AUTH_FAILED, cMSG_USERAUTH_PK_OK, \ - cMSG_USERAUTH_INFO_RESPONSE, MSG_SERVICE_REQUEST, MSG_SERVICE_ACCEPT, \ - MSG_USERAUTH_REQUEST, MSG_USERAUTH_SUCCESS, MSG_USERAUTH_FAILURE, \ - MSG_USERAUTH_BANNER, MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE, \ - cMSG_USERAUTH_GSSAPI_RESPONSE, cMSG_USERAUTH_GSSAPI_TOKEN, \ - cMSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, cMSG_USERAUTH_GSSAPI_ERROR, \ - cMSG_USERAUTH_GSSAPI_ERRTOK, cMSG_USERAUTH_GSSAPI_MIC,\ - MSG_USERAUTH_GSSAPI_RESPONSE, MSG_USERAUTH_GSSAPI_TOKEN, \ - MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, MSG_USERAUTH_GSSAPI_ERROR, \ - MSG_USERAUTH_GSSAPI_ERRTOK, MSG_USERAUTH_GSSAPI_MIC +import time +from paramiko.common import ( + cMSG_SERVICE_REQUEST, cMSG_DISCONNECT, DISCONNECT_SERVICE_NOT_AVAILABLE, + DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE, cMSG_USERAUTH_REQUEST, + cMSG_SERVICE_ACCEPT, DEBUG, AUTH_SUCCESSFUL, INFO, cMSG_USERAUTH_SUCCESS, + cMSG_USERAUTH_FAILURE, AUTH_PARTIALLY_SUCCESSFUL, + cMSG_USERAUTH_INFO_REQUEST, WARNING, AUTH_FAILED, cMSG_USERAUTH_PK_OK, + cMSG_USERAUTH_INFO_RESPONSE, MSG_SERVICE_REQUEST, MSG_SERVICE_ACCEPT, + MSG_USERAUTH_REQUEST, MSG_USERAUTH_SUCCESS, MSG_USERAUTH_FAILURE, + MSG_USERAUTH_BANNER, MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE, + cMSG_USERAUTH_GSSAPI_RESPONSE, cMSG_USERAUTH_GSSAPI_TOKEN, + cMSG_USERAUTH_GSSAPI_MIC, MSG_USERAUTH_GSSAPI_RESPONSE, + MSG_USERAUTH_GSSAPI_TOKEN, MSG_USERAUTH_GSSAPI_ERROR, + MSG_USERAUTH_GSSAPI_ERRTOK, MSG_USERAUTH_GSSAPI_MIC, MSG_NAMES, +) from paramiko.message import Message from paramiko.py3compat import bytestring -from paramiko.ssh_exception import SSHException, AuthenticationException, \ - BadAuthenticationType, PartialAuthentication +from paramiko.ssh_exception import ( + SSHException, AuthenticationException, BadAuthenticationType, + PartialAuthentication, +) from paramiko.server import InteractiveQuery from paramiko.ssh_gss import GSSAuth @@ -149,7 +152,7 @@ class AuthHandler (object): if self.auth_event is not None: self.auth_event.set() - ### internals... + # ...internals... def _request_auth(self): m = Message() @@ -188,6 +191,9 @@ class AuthHandler (object): return m.asbytes() def wait_for_response(self, event): + max_ts = None + if self.transport.auth_timeout is not None: + max_ts = time.time() + self.transport.auth_timeout while True: event.wait(0.1) if not self.transport.is_active(): @@ -197,6 +203,9 @@ class AuthHandler (object): raise e if event.is_set(): break + if max_ts is not None and max_ts <= time.time(): + raise AuthenticationException('Authentication timeout.') + if not self.is_authenticated(): e = self.transport.get_exception() if e is None: @@ -237,7 +246,8 @@ class AuthHandler (object): m.add_boolean(True) m.add_string(self.private_key.get_name()) m.add_string(self.private_key) - blob = self._get_session_blob(self.private_key, 'ssh-connection', self.username) + blob = self._get_session_blob( + self.private_key, 'ssh-connection', self.username) sig = self.private_key.sign_ssh_data(blob) m.add_string(sig) elif self.auth_method == 'keyboard-interactive': @@ -267,10 +277,11 @@ class AuthHandler (object): ptype, m = self.transport.packetizer.read_message() if ptype == MSG_USERAUTH_GSSAPI_TOKEN: srv_token = m.get_string() - next_token = sshgss.ssh_init_sec_context(self.gss_host, - mech, - self.username, - srv_token) + next_token = sshgss.ssh_init_sec_context( + self.gss_host, + mech, + self.username, + srv_token) # After this step the GSSAPI should not return any # token. If it does, we keep sending the token to # the server until no more token is returned. @@ -282,7 +293,8 @@ class AuthHandler (object): m.add_string(next_token) self.transport.send_message(m) else: - raise SSHException("Received Package: %s" % MSG_NAMES[ptype]) + raise SSHException( + "Received Package: %s" % MSG_NAMES[ptype]) m = Message() m.add_byte(cMSG_USERAUTH_GSSAPI_MIC) # send the MIC to the server @@ -297,7 +309,7 @@ class AuthHandler (object): maj_status = m.get_int() min_status = m.get_int() err_msg = m.get_string() - lang_tag = m.get_string() # we don't care! + m.get_string() # Lang tag - discarded raise SSHException("GSS-API Error:\nMajor Status: %s\n\ Minor Status: %s\ \nError Message:\ %s\n") % (str(maj_status), @@ -307,9 +319,12 @@ class AuthHandler (object): self._parse_userauth_failure(m) return else: - raise SSHException("Received Package: %s" % MSG_NAMES[ptype]) - elif self.auth_method == 'gssapi-keyex' and\ - self.transport.gss_kex_used: + raise SSHException( + "Received Package: %s" % MSG_NAMES[ptype]) + elif ( + self.auth_method == 'gssapi-keyex' and + self.transport.gss_kex_used + ): kexgss = self.transport.kexgss_ctxt kexgss.set_username(self.username) mic_token = kexgss.ssh_get_mic(self.transport.session_id) @@ -317,10 +332,13 @@ class AuthHandler (object): elif self.auth_method == 'none': pass else: - raise SSHException('Unknown auth method "%s"' % self.auth_method) + raise SSHException( + 'Unknown auth method "%s"' % self.auth_method) self.transport._send_message(m) else: - self.transport._log(DEBUG, 'Service request "%s" accepted (?)' % service) + self.transport._log( + DEBUG, + 'Service request "%s" accepted (?)' % service) def _send_auth_result(self, username, method, result): # okay, send result @@ -332,7 +350,8 @@ class AuthHandler (object): else: self.transport._log(INFO, 'Auth rejected (%s).' % method) m.add_byte(cMSG_USERAUTH_FAILURE) - m.add_string(self.transport.server_object.get_allowed_auths(username)) + m.add_string( + self.transport.server_object.get_allowed_auths(username)) if result == AUTH_PARTIALLY_SUCCESSFUL: m.add_boolean(True) else: @@ -356,7 +375,7 @@ class AuthHandler (object): m.add_string(p[0]) m.add_boolean(p[1]) self.transport._send_message(m) - + def _parse_userauth_request(self, m): if not self.transport.server_mode: # er, uh... what? @@ -372,12 +391,19 @@ class AuthHandler (object): username = m.get_text() service = m.get_text() method = m.get_text() - self.transport._log(DEBUG, 'Auth request (type=%s) service=%s, username=%s' % (method, service, username)) + self.transport._log( + DEBUG, + 'Auth request (type=%s) service=%s, username=%s' % ( + method, service, username)) if service != 'ssh-connection': self._disconnect_service_not_available() return - if (self.auth_username is not None) and (self.auth_username != username): - self.transport._log(WARNING, 'Auth rejected because the client attempted to change username in mid-flight') + if ((self.auth_username is not None) and + (self.auth_username != username)): + self.transport._log( + WARNING, + 'Auth rejected because the client attempted to change username in mid-flight' # noqa + ) self._disconnect_no_more_auth() return self.auth_username = username @@ -396,9 +422,12 @@ class AuthHandler (object): # in this case, just return the raw byte string. pass if changereq: - # always treated as failure, since we don't support changing passwords, but collect - # the list of valid auth types from the callback anyway - self.transport._log(DEBUG, 'Auth request to change passwords (rejected)') + # always treated as failure, since we don't support changing + # passwords, but collect the list of valid auth types from + # the callback anyway + self.transport._log( + DEBUG, + 'Auth request to change passwords (rejected)') newpassword = m.get_binary() try: newpassword = newpassword.decode('UTF-8', 'replace') @@ -406,7 +435,8 @@ class AuthHandler (object): pass result = AUTH_FAILED else: - result = self.transport.server_object.check_auth_password(username, password) + result = self.transport.server_object.check_auth_password( + username, password) elif method == 'publickey': sig_attached = m.get_boolean() keytype = m.get_text() @@ -414,16 +444,21 @@ class AuthHandler (object): try: key = self.transport._key_info[keytype](Message(keyblob)) except SSHException as e: - self.transport._log(INFO, 'Auth rejected: public key: %s' % str(e)) + self.transport._log( + INFO, + 'Auth rejected: public key: %s' % str(e)) key = None except: - self.transport._log(INFO, 'Auth rejected: unsupported or mangled public key') + self.transport._log( + INFO, + 'Auth rejected: unsupported or mangled public key') key = None if key is None: self._disconnect_no_more_auth() return # first check if this key is okay... if not, we can skip the verify - result = self.transport.server_object.check_auth_publickey(username, key) + result = self.transport.server_object.check_auth_publickey( + username, key) if result != AUTH_FAILED: # key is okay, verify it if not sig_attached: @@ -438,12 +473,14 @@ class AuthHandler (object): sig = Message(m.get_binary()) blob = self._get_session_blob(key, service, username) if not key.verify_ssh_sig(blob, sig): - self.transport._log(INFO, 'Auth rejected: invalid signature') + self.transport._log( + INFO, + 'Auth rejected: invalid signature') result = AUTH_FAILED elif method == 'keyboard-interactive': - lang = m.get_string() submethods = m.get_string() - result = self.transport.server_object.check_auth_interactive(username, submethods) + result = self.transport.server_object.check_auth_interactive( + username, submethods) if isinstance(result, InteractiveQuery): # make interactive query instead of response self._interactive_query(result) @@ -457,15 +494,17 @@ class AuthHandler (object): # We can't accept more than one OID, so if the SSH client sends # more than one, disconnect. if mechs > 1: - self.transport._log(INFO, - 'Disconnect: Received more than one GSS-API OID mechanism') + self.transport._log( + INFO, + 'Disconnect: Received more than one GSS-API OID mechanism') self._disconnect_no_more_auth() desired_mech = m.get_string() mech_ok = sshgss.ssh_check_mech(desired_mech) # if we don't support the mechanism, disconnect. if not mech_ok: - self.transport._log(INFO, - 'Disconnect: Received an invalid GSS-API OID mechanism') + self.transport._log( + INFO, + 'Disconnect: Received an invalid GSS-API OID mechanism') self._disconnect_no_more_auth() # send the Kerberos V5 GSSAPI OID to the client supported_mech = sshgss.ssh_gss_oids("server") @@ -495,8 +534,9 @@ class AuthHandler (object): m.add_string(token) self.transport._send_message(m) else: - raise SSHException("Client asked to handle paket %s" - %MSG_NAMES[ptype]) + result = AUTH_FAILED + self._send_auth_result(username, method, result) + return # check MIC ptype, m = self.transport.packetizer.read_message() if ptype == MSG_USERAUTH_GSSAPI_MIC: @@ -510,15 +550,12 @@ class AuthHandler (object): result = AUTH_FAILED self._send_auth_result(username, method, result) raise - if retval == 0: - # TODO: Implement client credential saving. - # The OpenSSH server is able to create a TGT with the delegated - # client credentials, but this is not supported by GSS-API. - result = AUTH_SUCCESSFUL - self.transport.server_object.check_auth_gssapi_with_mic( - username, result) - else: - result = AUTH_FAILED + # TODO: Implement client credential saving. + # The OpenSSH server is able to create a TGT with the delegated + # client credentials, but this is not supported by GSS-API. + result = AUTH_SUCCESSFUL + self.transport.server_object.check_auth_gssapi_with_mic( + username, result) elif method == "gssapi-keyex" and gss_auth: mic_token = m.get_string() sshgss = self.transport.kexgss_ctxt @@ -534,19 +571,18 @@ class AuthHandler (object): result = AUTH_FAILED self._send_auth_result(username, method, result) raise - if retval == 0: - result = AUTH_SUCCESSFUL - self.transport.server_object.check_auth_gssapi_keyex(username, - result) - else: - result = AUTH_FAILED + result = AUTH_SUCCESSFUL + self.transport.server_object.check_auth_gssapi_keyex( + username, result) else: result = self.transport.server_object.check_auth_none(username) # okay, send result self._send_auth_result(username, method, result) def _parse_userauth_success(self, m): - self.transport._log(INFO, 'Authentication (%s) successful!' % self.auth_method) + self.transport._log( + INFO, + 'Authentication (%s) successful!' % self.auth_method) self.authenticated = True self.transport._auth_trigger() if self.auth_event is not None: @@ -560,11 +596,18 @@ class AuthHandler (object): self.transport._log(DEBUG, 'Methods: ' + str(authlist)) self.transport.saved_exception = PartialAuthentication(authlist) elif self.auth_method not in authlist: - self.transport._log(DEBUG, 'Authentication type (%s) not permitted.' % self.auth_method) - self.transport._log(DEBUG, 'Allowed methods: ' + str(authlist)) - self.transport.saved_exception = BadAuthenticationType('Bad authentication type', authlist) + self.transport._log( + DEBUG, + 'Authentication type (%s) not permitted.' % self.auth_method) + self.transport._log( + DEBUG, + 'Allowed methods: ' + str(authlist)) + self.transport.saved_exception = BadAuthenticationType( + 'Bad authentication type', authlist) else: - self.transport._log(INFO, 'Authentication (%s) failed.' % self.auth_method) + self.transport._log( + INFO, + 'Authentication (%s) failed.' % self.auth_method) self.authenticated = False self.username = None if self.auth_event is not None: @@ -573,10 +616,9 @@ class AuthHandler (object): def _parse_userauth_banner(self, m): banner = m.get_string() self.banner = banner - lang = m.get_string() self.transport._log(INFO, 'Auth banner: %s' % banner) # who cares. - + def _parse_userauth_info_request(self, m): if self.auth_method != 'keyboard-interactive': raise SSHException('Illegal info request from server') @@ -587,15 +629,16 @@ class AuthHandler (object): prompt_list = [] for i in range(prompts): prompt_list.append((m.get_text(), m.get_boolean())) - response_list = self.interactive_handler(title, instructions, prompt_list) - + response_list = self.interactive_handler( + title, instructions, prompt_list) + m = Message() m.add_byte(cMSG_USERAUTH_INFO_RESPONSE) m.add_int(len(response_list)) for r in response_list: m.add_string(r) self.transport._send_message(m) - + def _parse_userauth_info_response(self, m): if not self.transport.server_mode: raise SSHException('Illegal info response from server') @@ -603,12 +646,14 @@ class AuthHandler (object): responses = [] for i in range(n): responses.append(m.get_text()) - result = self.transport.server_object.check_auth_interactive_response(responses) - if isinstance(type(result), InteractiveQuery): + result = self.transport.server_object.check_auth_interactive_response( + responses) + if isinstance(result, InteractiveQuery): # make interactive query instead of response self._interactive_query(result) return - self._send_auth_result(self.auth_username, 'keyboard-interactive', result) + self._send_auth_result( + self.auth_username, 'keyboard-interactive', result) _handler_table = { MSG_SERVICE_REQUEST: _parse_service_request, diff --git a/paramiko/ber.py b/paramiko/ber.py index a388df07..7725f944 100644 --- a/paramiko/ber.py +++ b/paramiko/ber.py @@ -71,7 +71,8 @@ class BER(object): t = size & 0x7f if self.idx + t > len(self.content): return None - size = util.inflate_long(self.content[self.idx: self.idx + t], True) + size = util.inflate_long( + self.content[self.idx: self.idx + t], True) self.idx += t if self.idx + size > len(self.content): # can't fit @@ -87,7 +88,8 @@ class BER(object): return util.inflate_long(data) else: # 1: boolean (00 false, otherwise true) - raise BERException('Unknown ber encoding type %d (robey is lazy)' % ident) + raise BERException( + 'Unknown ber encoding type %d (robey is lazy)' % ident) @staticmethod def decode_sequence(data): diff --git a/paramiko/buffered_pipe.py b/paramiko/buffered_pipe.py index ac35b3e1..d9f5149d 100644 --- a/paramiko/buffered_pipe.py +++ b/paramiko/buffered_pipe.py @@ -41,7 +41,7 @@ class BufferedPipe (object): file or socket, but is fed data from another thread. This is used by `.Channel`. """ - + def __init__(self): self._lock = threading.Lock() self._cv = threading.Condition(self._lock) @@ -67,21 +67,30 @@ class BufferedPipe (object): Set an event on this buffer. When data is ready to be read (or the buffer has been closed), the event will be set. When no data is ready, the event will be cleared. - + :param threading.Event event: the event to set/clear """ - self._event = event - if len(self._buffer) > 0: - event.set() - else: - event.clear() - + self._lock.acquire() + try: + self._event = event + # Make sure the event starts in `set` state if we appear to already + # be closed; otherwise, if we start in `clear` state & are closed, + # nothing will ever call `.feed` and the event (& OS pipe, if we're + # wrapping one - see `Channel.fileno`) will permanently stay in + # `clear`, causing deadlock if e.g. `select`ed upon. + if self._closed or len(self._buffer) > 0: + event.set() + else: + event.clear() + finally: + self._lock.release() + def feed(self, data): """ Feed new data into this pipe. This method is assumed to be called from a separate thread, so synchronization is done. - - :param data: the data to add, as a `str` + + :param data: the data to add, as a ``str`` or ``bytes`` """ self._lock.acquire() try: @@ -97,7 +106,7 @@ class BufferedPipe (object): Returns true if data is buffered and ready to be read from this feeder. A ``False`` result does not mean that the feeder has closed; it means you may need to wait before more data arrives. - + :return: ``True`` if a `read` call would immediately return at least one byte; ``False`` otherwise. @@ -125,11 +134,11 @@ class BufferedPipe (object): :param int nbytes: maximum number of bytes to read :param float timeout: maximum seconds to wait (or ``None``, the default, to wait forever) - :return: the read data, as a `str` - - :raises PipeTimeout: - if a timeout was specified and no data was ready before that - timeout + :return: the read data, as a ``str`` or ``bytes`` + + :raises: + `.PipeTimeout` -- if a timeout was specified and no data was ready + before that timeout """ out = bytes() self._lock.acquire() @@ -163,11 +172,11 @@ class BufferedPipe (object): self._lock.release() return out - + def empty(self): """ Clear out the buffer and return all data that was in it. - + :return: any data that was in the buffer prior to clearing it out, as a `str` @@ -181,7 +190,7 @@ class BufferedPipe (object): return out finally: self._lock.release() - + def close(self): """ Close this pipe object. Future calls to `read` after the buffer @@ -199,7 +208,7 @@ class BufferedPipe (object): def __len__(self): """ Return the number of bytes buffered. - + :return: number (`int`) of bytes buffered """ self._lock.acquire() diff --git a/paramiko/channel.py b/paramiko/channel.py index 8a97c974..c6016a0e 100644 --- a/paramiko/channel.py +++ b/paramiko/channel.py @@ -25,13 +25,15 @@ import os import socket import time import threading +# TODO: switch as much of py3compat.py to 'six' as possible, then use six.wraps from functools import wraps from paramiko import util -from paramiko.common import cMSG_CHANNEL_REQUEST, cMSG_CHANNEL_WINDOW_ADJUST, \ - cMSG_CHANNEL_DATA, cMSG_CHANNEL_EXTENDED_DATA, DEBUG, ERROR, \ - cMSG_CHANNEL_SUCCESS, cMSG_CHANNEL_FAILURE, cMSG_CHANNEL_EOF, \ - cMSG_CHANNEL_CLOSE +from paramiko.common import ( + cMSG_CHANNEL_REQUEST, cMSG_CHANNEL_WINDOW_ADJUST, cMSG_CHANNEL_DATA, + cMSG_CHANNEL_EXTENDED_DATA, DEBUG, ERROR, cMSG_CHANNEL_SUCCESS, + cMSG_CHANNEL_FAILURE, cMSG_CHANNEL_EOF, cMSG_CHANNEL_CLOSE, +) from paramiko.message import Message from paramiko.py3compat import bytes_types from paramiko.ssh_exception import SSHException @@ -45,16 +47,17 @@ def open_only(func): """ Decorator for `.Channel` methods which performs an openness check. - :raises SSHException: - If the wrapped method is called on an unopened `.Channel`. + :raises: + `.SSHException` -- If the wrapped method is called on an unopened + `.Channel`. """ @wraps(func) def _check(self, *args, **kwds): if ( - self.closed - or self.eof_received - or self.eof_sent - or not self.active + self.closed or + self.eof_received or + self.eof_sent or + not self.active ): raise SSHException('Channel is not open') return func(self, *args, **kwds) @@ -74,7 +77,7 @@ class Channel (ClosingContextManager): flow-controlled independently.) Similarly, if the server isn't reading data you send, calls to `send` may block, unless you set a timeout. This is exactly like a normal network socket, so it shouldn't be too surprising. - + Instances of this class may be used as context managers. """ @@ -88,15 +91,20 @@ class Channel (ClosingContextManager): :param int chanid: the ID of this channel, as passed by an existing `.Transport`. """ + #: Channel ID self.chanid = chanid + #: Remote channel ID self.remote_chanid = 0 + #: `.Transport` managing this channel self.transport = None + #: Whether the connection is presently active self.active = False self.eof_received = 0 self.eof_sent = 0 self.in_buffer = BufferedPipe() self.in_stderr_buffer = BufferedPipe() self.timeout = None + #: Whether the connection has been closed self.closed = False self.ultra_debug = False self.lock = threading.Lock() @@ -150,16 +158,18 @@ class Channel (ClosingContextManager): after creating a client channel, to ask the server to provide some basic terminal semantics for a shell invoked with `invoke_shell`. It isn't necessary (or desirable) to call this method if you're going - to exectue a single command with `exec_command`. + to execute a single command with `exec_command`. - :param str term: the terminal type to emulate (for example, ``'vt100'``) + :param str term: the terminal type to emulate + (for example, ``'vt100'``) :param int width: width (in characters) of the terminal screen :param int height: height (in characters) of the terminal screen :param int width_pixels: width (in pixels) of the terminal screen :param int height_pixels: height (in pixels) of the terminal screen - :raises SSHException: - if the request was rejected or the channel was closed + :raises: + `.SSHException` -- if the request was rejected or the channel was + closed """ m = Message() m.add_byte(cMSG_CHANNEL_REQUEST) @@ -190,7 +200,8 @@ class Channel (ClosingContextManager): When the shell exits, the channel will be closed and can't be reused. You must open a new channel if you wish to open another shell. - :raises SSHException: if the request was rejected or the channel was + :raises: + `.SSHException` -- if the request was rejected or the channel was closed """ m = Message() @@ -215,7 +226,8 @@ class Channel (ClosingContextManager): :param str command: a shell command to execute. - :raises SSHException: if the request was rejected or the channel was + :raises: + `.SSHException` -- if the request was rejected or the channel was closed """ m = Message() @@ -240,8 +252,9 @@ class Channel (ClosingContextManager): :param str subsystem: name of the subsystem being requested. - :raises SSHException: - if the request was rejected or the channel was closed + :raises: + `.SSHException` -- if the request was rejected or the channel was + closed """ m = Message() m.add_byte(cMSG_CHANNEL_REQUEST) @@ -264,8 +277,9 @@ class Channel (ClosingContextManager): :param int width_pixels: new width (in pixels) of the terminal screen :param int height_pixels: new height (in pixels) of the terminal screen - :raises SSHException: - if the request was rejected or the channel was closed + :raises: + `.SSHException` -- if the request was rejected or the channel was + closed """ m = Message() m.add_byte(cMSG_CHANNEL_REQUEST) @@ -278,6 +292,59 @@ class Channel (ClosingContextManager): m.add_int(height_pixels) self.transport._send_user_message(m) + @open_only + def update_environment(self, environment): + """ + Updates this channel's remote shell environment. + + .. note:: + This operation is additive - i.e. the current environment is not + reset before the given environment variables are set. + + .. warning:: + Servers may silently reject some environment variables; see the + warning in `set_environment_variable` for details. + + :param dict environment: + a dictionary containing the name and respective values to set + :raises: + `.SSHException` -- if any of the environment variables was rejected + by the server or the channel was closed + """ + for name, value in environment.items(): + try: + self.set_environment_variable(name, value) + except SSHException as e: + err = "Failed to set environment variable \"{0}\"." + raise SSHException(err.format(name), e) + + @open_only + def set_environment_variable(self, name, value): + """ + Set the value of an environment variable. + + .. warning:: + The server may reject this request depending on its ``AcceptEnv`` + setting; such rejections will fail silently (which is common client + practice for this particular request type). Make sure you + understand your server's configuration before using! + + :param str name: name of the environment variable + :param str value: value of the environment variable + + :raises: + `.SSHException` -- if the request was rejected or the channel was + closed + """ + m = Message() + m.add_byte(cMSG_CHANNEL_REQUEST) + m.add_int(self.remote_chanid) + m.add_string('env') + m.add_boolean(False) + m.add_string(name) + m.add_string(value) + self.transport._send_user_message(m) + def exit_status_ready(self): """ Return true if the remote process has exited and returned an exit @@ -286,7 +353,8 @@ class Channel (ClosingContextManager): return an exit status in some cases (like bad servers). :return: - ``True`` if `recv_exit_status` will return immediately, else ``False``. + ``True`` if `recv_exit_status` will return immediately, else + ``False``. .. versionadded:: 1.7.3 """ @@ -300,6 +368,17 @@ class Channel (ClosingContextManager): it does, or until the channel is closed. If no exit status is provided by the server, -1 is returned. + .. warning:: + In some situations, receiving remote output larger than the current + `.Transport` or session's ``window_size`` (e.g. that set by the + ``default_window_size`` kwarg for `.Transport.__init__`) will cause + `.recv_exit_status` to hang indefinitely if it is called prior to a + sufficiently large `.Channel.recv` (or if there are no threads + calling `.Channel.recv` in the background). + + In these cases, ensuring that `.recv_exit_status` is called *after* + `.Channel.recv` (or, again, using threads) can avoid the hang. + :return: the exit code (as an `int`) of the process on the server. .. versionadded:: 1.2 @@ -330,14 +409,20 @@ class Channel (ClosingContextManager): self.transport._send_user_message(m) @open_only - def request_x11(self, screen_number=0, auth_protocol=None, auth_cookie=None, - single_connection=False, handler=None): + def request_x11( + self, + screen_number=0, + auth_protocol=None, + auth_cookie=None, + single_connection=False, + handler=None + ): """ Request an x11 session on this channel. If the server allows it, further x11 requests can be made from the server to the client, when an x11 application is run in a shell session. - From RFC4254:: + From :rfc:`4254`:: It is RECOMMENDED that the 'x11 authentication cookie' that is sent be a fake, random cookie, and that the cookie be checked and @@ -347,7 +432,7 @@ class Channel (ClosingContextManager): generated, used, and returned. You will need to use this value to verify incoming x11 requests and replace them with the actual local x11 cookie (which requires some knowledge of the x11 protocol). - + If a handler is passed in, the handler is called from another thread whenever a new x11 connection arrives. The default handler queues up incoming x11 connections, which may be retrieved using @@ -366,8 +451,8 @@ class Channel (ClosingContextManager): if True, only a single x11 connection will be forwarded (by default, any number of x11 connections can arrive over this session) - :param function handler: - an optional handler to use for incoming X11 connections + :param handler: + an optional callable handler to use for incoming X11 connections :return: the auth_cookie used """ if auth_protocol is None: @@ -396,10 +481,12 @@ class Channel (ClosingContextManager): Request for a forward SSH Agent on this channel. This is only valid for an ssh-agent from OpenSSH !!! - :param function handler: - a required handler to use for incoming SSH Agent connections + :param handler: + a required callable handler to use for incoming SSH Agent + connections - :return: True if we are ok, else False (at that time we always return ok) + :return: True if we are ok, else False + (at that time we always return ok) :raises: SSHException in case of channel problem. """ @@ -480,16 +567,16 @@ class Channel (ClosingContextManager): self._feed(data) return old - ### socket API + # ...socket API... def settimeout(self, timeout): """ Set a timeout on blocking read/write operations. The ``timeout`` - argument can be a nonnegative float expressing seconds, or ``None``. If - a float is given, subsequent channel read/write operations will raise - a timeout exception if the timeout period value has elapsed before the - operation has completed. Setting a timeout of ``None`` disables - timeouts on socket operations. + argument can be a nonnegative float expressing seconds, or ``None``. + If a float is given, subsequent channel read/write operations will + raise a timeout exception if the timeout period value has elapsed + before the operation has completed. Setting a timeout of ``None`` + disables timeouts on socket operations. ``chan.settimeout(0.0)`` is equivalent to ``chan.setblocking(0)``; ``chan.settimeout(None)`` is equivalent to ``chan.setblocking(1)``. @@ -583,11 +670,11 @@ class Channel (ClosingContextManager): """ Receive data from the channel. The return value is a string representing the data received. The maximum amount of data to be - received at once is specified by ``nbytes``. If a string of length zero - is returned, the channel stream has closed. + received at once is specified by ``nbytes``. If a string of + length zero is returned, the channel stream has closed. :param int nbytes: maximum number of bytes to read. - :return: received data, as a `str` + :return: received data, as a ``str``/``bytes``. :raises socket.timeout: if no data is ready before the timeout set by `settimeout`. @@ -734,7 +821,7 @@ class Channel (ClosingContextManager): if sending stalled for longer than the timeout set by `settimeout`. :raises socket.error: if an error occurred before the entire string was sent. - + .. note:: If the channel is closed while only part of the data has been sent, there is no way to determine how much data (if any) was sent. @@ -758,7 +845,7 @@ class Channel (ClosingContextManager): if sending stalled for longer than the timeout set by `settimeout`. :raises socket.error: if an error occurred before the entire string was sent. - + .. versionadded:: 1.1 """ while s: @@ -870,7 +957,13 @@ class Channel (ClosingContextManager): """ self.shutdown(1) - ### calls from Transport + @property + def _closed(self): + # Concession to Python 3's socket API, which has a private ._closed + # attribute instead of a semipublic .closed attribute. + return self.closed + + # ...calls from Transport def _set_transport(self, transport): self.transport = transport @@ -879,7 +972,8 @@ class Channel (ClosingContextManager): def _set_window(self, window_size, max_packet_size): self.in_window_size = window_size self.in_max_packet_size = max_packet_size - # threshold of bytes we receive before we bother to send a window update + # threshold of bytes we receive before we bother to send + # a window update self.in_window_threshold = window_size // 10 self.in_window_sofar = 0 self._log(DEBUG, 'Max packet in: %d bytes' % max_packet_size) @@ -887,8 +981,9 @@ class Channel (ClosingContextManager): def _set_remote_channel(self, chanid, window_size, max_packet_size): self.remote_chanid = chanid self.out_window_size = window_size - self.out_max_packet_size = self.transport. \ - _sanitize_packet_size(max_packet_size) + self.out_max_packet_size = self.transport._sanitize_packet_size( + max_packet_size + ) self.active = 1 self._log(DEBUG, 'Max packet out: %d bytes' % self.out_max_packet_size) @@ -920,7 +1015,10 @@ class Channel (ClosingContextManager): code = m.get_int() s = m.get_binary() if code != 1: - self._log(ERROR, 'unknown extended_data type %d; discarding' % code) + self._log( + ERROR, + 'unknown extended_data type %d; discarding' % code + ) return if self.combine_stderr: self._feed(s) @@ -960,8 +1058,15 @@ class Channel (ClosingContextManager): if server is None: ok = False else: - ok = server.check_channel_pty_request(self, term, width, height, pixelwidth, - pixelheight, modes) + ok = server.check_channel_pty_request( + self, + term, + width, + height, + pixelwidth, + pixelheight, + modes + ) elif key == 'shell': if server is None: ok = False @@ -975,7 +1080,7 @@ class Channel (ClosingContextManager): else: ok = server.check_channel_env_request(self, name, value) elif key == 'exec': - cmd = m.get_text() + cmd = m.get_string() if server is None: ok = False else: @@ -994,8 +1099,8 @@ class Channel (ClosingContextManager): if server is None: ok = False else: - ok = server.check_channel_window_change_request(self, width, height, pixelwidth, - pixelheight) + ok = server.check_channel_window_change_request( + self, width, height, pixelwidth, pixelheight) elif key == 'x11-req': single_connection = m.get_boolean() auth_proto = m.get_text() @@ -1004,8 +1109,13 @@ class Channel (ClosingContextManager): if server is None: ok = False else: - ok = server.check_channel_x11_request(self, single_connection, - auth_proto, auth_cookie, screen_number) + ok = server.check_channel_x11_request( + self, + single_connection, + auth_proto, + auth_cookie, + screen_number + ) elif key == 'auth-agent-req@openssh.com': if server is None: ok = False @@ -1047,14 +1157,15 @@ class Channel (ClosingContextManager): if m is not None: self.transport._send_user_message(m) - ### internals... + # ...internals... def _send(self, s, m): size = len(s) self.lock.acquire() try: if self.closed: - # this doesn't seem useful, but it is the documented behavior of Socket + # this doesn't seem useful, but it is the documented behavior + # of Socket raise socket.error('Socket is closed') size = self._wait_for_send_window(size) if size == 0: @@ -1122,7 +1233,8 @@ class Channel (ClosingContextManager): return m1, m2 def _unlink(self): - # server connection could die before we become active: still signal the close! + # server connection could die before we become active: + # still signal the close! if self.closed: return self.lock.acquire() @@ -1165,7 +1277,8 @@ class Channel (ClosingContextManager): # should we block? if self.timeout == 0.0: raise socket.timeout() - # loop here in case we get woken up but a different thread has filled the buffer + # loop here in case we get woken up but a different thread has + # filled the buffer timeout = self.timeout while self.out_window_size == 0: if self.closed or self.eof_sent: diff --git a/paramiko/client.py b/paramiko/client.py index 393e3e09..33a9b6c3 100644 --- a/paramiko/client.py +++ b/paramiko/client.py @@ -22,20 +22,25 @@ SSH client & key policies from binascii import hexlify import getpass +import inspect import os import socket import warnings +from errno import ECONNREFUSED, EHOSTUNREACH from paramiko.agent import Agent from paramiko.common import DEBUG from paramiko.config import SSH_PORT from paramiko.dsskey import DSSKey from paramiko.ecdsakey import ECDSAKey +from paramiko.ed25519key import Ed25519Key from paramiko.hostkeys import HostKeys from paramiko.py3compat import string_types from paramiko.resource import ResourceManager from paramiko.rsakey import RSAKey -from paramiko.ssh_exception import SSHException, BadHostKeyException +from paramiko.ssh_exception import ( + SSHException, BadHostKeyException, NoValidConnectionsError +) from paramiko.transport import Transport from paramiko.util import retry_on_signal, ClosingContextManager @@ -88,7 +93,7 @@ class SSHClient (ClosingContextManager): :param str filename: the filename to read, or ``None`` - :raises IOError: + :raises: ``IOError`` -- if a filename was provided and the file could not be read """ if filename is None: @@ -115,7 +120,7 @@ class SSHClient (ClosingContextManager): :param str filename: the filename to read - :raises IOError: if the filename could not be read + :raises: ``IOError`` -- if the filename could not be read """ self._host_keys_filename = filename self._host_keys.load(filename) @@ -128,7 +133,7 @@ class SSHClient (ClosingContextManager): :param str filename: the filename to save to - :raises IOError: if the file could not be written + :raises: ``IOError`` -- if the file could not be written """ # update local host keys from file (in case other SSH clients @@ -139,7 +144,8 @@ class SSHClient (ClosingContextManager): with open(filename, 'w') as f: for hostname, keys in self._host_keys.items(): for keytype, key in keys.items(): - f.write('%s %s %s\n' % (hostname, keytype, key.get_base64())) + f.write('%s %s %s\n' % ( + hostname, keytype, key.get_base64())) def get_host_keys(self): """ @@ -161,21 +167,69 @@ class SSHClient (ClosingContextManager): def set_missing_host_key_policy(self, policy): """ - Set the policy to use when connecting to a server that doesn't have a - host key in either the system or local `.HostKeys` objects. The - default policy is to reject all unknown servers (using `.RejectPolicy`). - You may substitute `.AutoAddPolicy` or write your own policy class. + Set policy to use when connecting to servers without a known host key. + + Specifically: + + * A **policy** is a "policy class" (or instance thereof), namely some + subclass of `.MissingHostKeyPolicy` such as `.RejectPolicy` (the + default), `.AutoAddPolicy`, `.WarningPolicy`, or a user-created + subclass. + * A host key is **known** when it appears in the client object's cached + host keys structures (those manipulated by `load_system_host_keys` + and/or `load_host_keys`). :param .MissingHostKeyPolicy policy: the policy to use when receiving a host key from a previously-unknown server """ + if inspect.isclass(policy): + policy = policy() self._policy = policy - def connect(self, hostname, port=SSH_PORT, username=None, password=None, pkey=None, - key_filename=None, timeout=None, allow_agent=True, look_for_keys=True, - compress=False, sock=None, gss_auth=False, gss_kex=False, - gss_deleg_creds=True, gss_host=None, banner_timeout=None): + def _families_and_addresses(self, hostname, port): + """ + Yield pairs of address families and addresses to try for connecting. + + :param str hostname: the server to connect to + :param int port: the server port to connect to + :returns: Yields an iterable of ``(family, address)`` tuples + """ + guess = True + addrinfos = socket.getaddrinfo( + hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM) + for (family, socktype, proto, canonname, sockaddr) in addrinfos: + if socktype == socket.SOCK_STREAM: + yield family, sockaddr + guess = False + + # some OS like AIX don't indicate SOCK_STREAM support, so just + # guess. :( We only do this if we did not get a single result marked + # as socktype == SOCK_STREAM. + if guess: + for family, _, _, _, sockaddr in addrinfos: + yield family, sockaddr + + def connect( + self, + hostname, + port=SSH_PORT, + username=None, + password=None, + pkey=None, + key_filename=None, + timeout=None, + allow_agent=True, + look_for_keys=True, + compress=False, + sock=None, + gss_auth=False, + gss_kex=False, + gss_deleg_creds=True, + gss_host=None, + banner_timeout=None, + auth_timeout=None, + ): """ Connect to an SSH server and authenticate to it. The server's host key is checked against the system host keys (see `load_system_host_keys`) @@ -206,8 +260,10 @@ class SSHClient (ClosingContextManager): :param str key_filename: the filename, or list of filenames, of optional private key(s) to try for authentication - :param float timeout: an optional timeout (in seconds) for the TCP connect - :param bool allow_agent: set to False to disable connecting to the SSH agent + :param float timeout: + an optional timeout (in seconds) for the TCP connect + :param bool allow_agent: + set to False to disable connecting to the SSH agent :param bool look_for_keys: set to False to disable searching for discoverable private key files in ``~/.ssh/`` @@ -215,17 +271,24 @@ class SSHClient (ClosingContextManager): :param socket sock: an open socket or socket-like object (such as a `.Channel`) to use for communication to the target host - :param bool gss_auth: ``True`` if you want to use GSS-API authentication - :param bool gss_kex: Perform GSS-API Key Exchange and user authentication + :param bool gss_auth: + ``True`` if you want to use GSS-API authentication + :param bool gss_kex: + Perform GSS-API Key Exchange and user authentication :param bool gss_deleg_creds: Delegate GSS-API client credentials or not - :param str gss_host: The targets name in the kerberos database. default: hostname + :param str gss_host: + The targets name in the kerberos database. default: hostname :param float banner_timeout: an optional timeout (in seconds) to wait for the SSH banner to be presented. + :param float auth_timeout: an optional timeout (in seconds) to wait for + an authentication response. - :raises BadHostKeyException: if the server's host key could not be + :raises: + `.BadHostKeyException` -- if the server's host key could not be verified - :raises AuthenticationException: if authentication failed - :raises SSHException: if there was any other error connecting or + :raises: `.AuthenticationException` -- if authentication failed + :raises: + `.SSHException` -- if there was any other error connecting or establishing an SSH session :raises socket.error: if a socket error occurred while connecting @@ -234,23 +297,40 @@ class SSHClient (ClosingContextManager): ``gss_deleg_creds`` and ``gss_host`` arguments. """ if not sock: - for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM): - if socktype == socket.SOCK_STREAM: - af = family - addr = sockaddr - break - else: - # some OS like AIX don't indicate SOCK_STREAM support, so just guess. :( - af, _, _, _, addr = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM) - sock = socket.socket(af, socket.SOCK_STREAM) - if timeout is not None: + errors = {} + # Try multiple possible address families (e.g. IPv4 vs IPv6) + to_try = list(self._families_and_addresses(hostname, port)) + for af, addr in to_try: try: - sock.settimeout(timeout) - except: - pass - retry_on_signal(lambda: sock.connect(addr)) - - t = self._transport = Transport(sock, gss_kex=gss_kex, gss_deleg_creds=gss_deleg_creds) + sock = socket.socket(af, socket.SOCK_STREAM) + if timeout is not None: + try: + sock.settimeout(timeout) + except: + pass + retry_on_signal(lambda: sock.connect(addr)) + # Break out of the loop on success + break + except socket.error as e: + # Raise anything that isn't a straight up connection error + # (such as a resolution error) + if e.errno not in (ECONNREFUSED, EHOSTUNREACH): + raise + # Capture anything else so we know how the run looks once + # iteration is complete. Retain info about which attempt + # this was. + errors[addr] = e + + # Make sure we explode usefully if no address family attempts + # succeeded. We've no way of knowing which error is the "right" + # one, so we construct a hybrid exception containing all the real + # ones, of a subclass that client code should still be watching for + # (socket.error) + if len(errors) == len(to_try): + raise NoValidConnectionsError(errors) + + t = self._transport = Transport( + sock, gss_kex=gss_kex, gss_deleg_creds=gss_deleg_creds) t.use_compression(compress=compress) if gss_kex and gss_host is None: t.set_gss_host(hostname) @@ -262,7 +342,10 @@ class SSHClient (ClosingContextManager): t.set_log_channel(self._log_channel) if banner_timeout is not None: t.banner_timeout = banner_timeout - t.start_client() + if auth_timeout is not None: + t.auth_timeout = auth_timeout + t.start_client(timeout=timeout) + t.set_sshclient(self) ResourceManager.register(self, t) server_key = t.get_remote_server_key() @@ -277,13 +360,14 @@ class SSHClient (ClosingContextManager): # host key, because the host is authenticated via GSS-API / SSPI as # well as our client. if not self._transport.use_gss_kex: - our_server_key = self._system_host_keys.get(server_hostkey_name, - {}).get(keytype, None) + our_server_key = self._system_host_keys.get( + server_hostkey_name, {}).get(keytype) if our_server_key is None: our_server_key = self._host_keys.get(server_hostkey_name, {}).get(keytype, None) if our_server_key is None: - # will raise exception if the key is rejected; let that fall out + # will raise exception if the key is rejected; + # let that fall out self._policy.missing_host_key(self, server_hostkey_name, server_key) # if the callback returns, assume the key is ok @@ -309,6 +393,12 @@ class SSHClient (ClosingContextManager): def close(self): """ Close this SSHClient and its underlying `.Transport`. + + .. warning:: + Failure to do this may, in some situations, cause your Python + interpreter to hang at shutdown (often due to race conditions). + It's good practice to `close` your client objects anytime you're + done using them, instead of relying on garbage collection. """ if self._transport is None: return @@ -319,7 +409,14 @@ class SSHClient (ClosingContextManager): self._agent.close() self._agent = None - def exec_command(self, command, bufsize=-1, timeout=None, get_pty=False): + def exec_command( + self, + command, + bufsize=-1, + timeout=None, + get_pty=False, + environment=None, + ): """ Execute a command on the SSH server. A new `.Channel` is opened and the requested command is executed. The command's input and output @@ -331,17 +428,27 @@ class SSHClient (ClosingContextManager): interpreted the same way as by the built-in ``file()`` function in Python :param int timeout: - set command's channel timeout. See `Channel.settimeout`.settimeout + set command's channel timeout. See `.Channel.settimeout` + :param dict environment: + a dict of shell environment variables, to be merged into the + default environment that the remote command executes within. + + .. warning:: + Servers may silently reject some environment variables; see the + warning in `.Channel.set_environment_variable` for details. + :return: the stdin, stdout, and stderr of the executing command, as a 3-tuple - :raises SSHException: if the server fails to execute the command + :raises: `.SSHException` -- if the server fails to execute the command """ - chan = self._transport.open_session() + chan = self._transport.open_session(timeout=timeout) if get_pty: chan.get_pty() chan.settimeout(timeout) + if environment: + chan.update_environment(environment) chan.exec_command(command) stdin = chan.makefile('wb', bufsize) stdout = chan.makefile('r', bufsize) @@ -349,7 +456,7 @@ class SSHClient (ClosingContextManager): return stdin, stdout, stderr def invoke_shell(self, term='vt100', width=80, height=24, width_pixels=0, - height_pixels=0): + height_pixels=0, environment=None): """ Start an interactive shell session on the SSH server. A new `.Channel` is opened and connected to a pseudo-terminal using the requested @@ -361,9 +468,10 @@ class SSHClient (ClosingContextManager): :param int height: the height (in characters) of the terminal window :param int width_pixels: the width (in pixels) of the terminal window :param int height_pixels: the height (in pixels) of the terminal window + :param dict environment: the command's environment :return: a new `.Channel` connected to the remote shell - :raises SSHException: if the server fails to invoke a shell + :raises: `.SSHException` -- if the server fails to invoke a shell """ chan = self._transport.open_session() chan.get_pty(term, width, height, width_pixels, height_pixels) @@ -404,7 +512,8 @@ class SSHClient (ClosingContextManager): """ saved_exception = None two_factor = False - allowed_types = [] + allowed_types = set() + two_factor_types = set(['keyboard-interactive', 'password']) # If GSS-API support and GSS-PI Key Exchange was performed, we attempt # authentication with gssapi-keyex. @@ -429,9 +538,12 @@ class SSHClient (ClosingContextManager): if pkey is not None: try: - self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint())) - allowed_types = self._transport.auth_publickey(username, pkey) - two_factor = (allowed_types == ['password']) + self._log( + DEBUG, + 'Trying SSH key %s' % hexlify(pkey.get_fingerprint())) + allowed_types = set( + self._transport.auth_publickey(username, pkey)) + two_factor = (allowed_types & two_factor_types) if not two_factor: return except SSHException as e: @@ -439,12 +551,17 @@ class SSHClient (ClosingContextManager): if not two_factor: for key_filename in key_filenames: - for pkey_class in (RSAKey, DSSKey, ECDSAKey): + for pkey_class in (RSAKey, DSSKey, ECDSAKey, Ed25519Key): try: - key = pkey_class.from_private_key_file(key_filename, password) - self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename)) - self._transport.auth_publickey(username, key) - two_factor = (allowed_types == ['password']) + key = pkey_class.from_private_key_file( + key_filename, password) + self._log( + DEBUG, + 'Trying key %s from %s' % ( + hexlify(key.get_fingerprint()), key_filename)) + allowed_types = set( + self._transport.auth_publickey(username, key)) + two_factor = (allowed_types & two_factor_types) if not two_factor: return break @@ -457,10 +574,15 @@ class SSHClient (ClosingContextManager): for key in self._agent.get_keys(): try: - self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint())) - # for 2-factor auth a successfully auth'd key will result in ['password'] - allowed_types = self._transport.auth_publickey(username, key) - two_factor = (allowed_types == ['password']) + self._log( + DEBUG, + 'Trying SSH agent key %s' % hexlify( + key.get_fingerprint())) + # for 2-factor auth a successfully auth'd key password + # will return an allowed 2fac auth method + allowed_types = set( + self._transport.auth_publickey(username, key)) + two_factor = (allowed_types & two_factor_types) if not two_factor: return break @@ -469,25 +591,20 @@ class SSHClient (ClosingContextManager): if not two_factor: keyfiles = [] - rsa_key = os.path.expanduser('~/.ssh/id_rsa') - dsa_key = os.path.expanduser('~/.ssh/id_dsa') - ecdsa_key = os.path.expanduser('~/.ssh/id_ecdsa') - if os.path.isfile(rsa_key): - keyfiles.append((RSAKey, rsa_key)) - if os.path.isfile(dsa_key): - keyfiles.append((DSSKey, dsa_key)) - if os.path.isfile(ecdsa_key): - keyfiles.append((ECDSAKey, ecdsa_key)) - # look in ~/ssh/ for windows users: - rsa_key = os.path.expanduser('~/ssh/id_rsa') - dsa_key = os.path.expanduser('~/ssh/id_dsa') - ecdsa_key = os.path.expanduser('~/ssh/id_ecdsa') - if os.path.isfile(rsa_key): - keyfiles.append((RSAKey, rsa_key)) - if os.path.isfile(dsa_key): - keyfiles.append((DSSKey, dsa_key)) - if os.path.isfile(ecdsa_key): - keyfiles.append((ECDSAKey, ecdsa_key)) + + for keytype, name in [ + (RSAKey, "rsa"), + (DSSKey, "dsa"), + (ECDSAKey, "ecdsa"), + (Ed25519Key, "ed25519"), + ]: + # ~/ssh/ is for windows + for directory in [".ssh", "ssh"]: + full_path = os.path.expanduser( + "~/%s/id_%s" % (directory, name) + ) + if os.path.isfile(full_path): + keyfiles.append((keytype, full_path)) if not look_for_keys: keyfiles = [] @@ -495,10 +612,16 @@ class SSHClient (ClosingContextManager): for pkey_class, filename in keyfiles: try: key = pkey_class.from_private_key_file(filename, password) - self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename)) - # for 2-factor auth a successfully auth'd key will result in ['password'] - allowed_types = self._transport.auth_publickey(username, key) - two_factor = (allowed_types == ['password']) + self._log( + DEBUG, + 'Trying discovered key %s in %s' % ( + hexlify(key.get_fingerprint()), filename)) + + # for 2-factor auth a successfully auth'd key will result + # in ['password'] + allowed_types = set( + self._transport.auth_publickey(username, key)) + two_factor = (allowed_types & two_factor_types) if not two_factor: return break @@ -512,7 +635,11 @@ class SSHClient (ClosingContextManager): except SSHException as e: saved_exception = e elif two_factor: - raise SSHException('Two-factor authentication requires a password') + try: + self._transport.auth_interactive_dumb(username) + return + except SSHException as e: + saved_exception = e # if we got an auth-failed exception earlier, re-raise it if saved_exception is not None: @@ -577,4 +704,5 @@ class WarningPolicy (MissingHostKeyPolicy): """ def missing_host_key(self, client, hostname, key): warnings.warn('Unknown %s host key for %s: %s' % - (key.get_name(), hostname, hexlify(key.get_fingerprint()))) + (key.get_name(), hostname, hexlify( + key.get_fingerprint()))) diff --git a/paramiko/common.py b/paramiko/common.py index 0b0cc2a7..556f046a 100644 --- a/paramiko/common.py +++ b/paramiko/common.py @@ -20,10 +20,12 @@ Common constants and global variables. """ import logging -from paramiko.py3compat import byte_chr, PY2, bytes_types, string_types, b, long +from paramiko.py3compat import ( + byte_chr, PY2, bytes_types, string_types, b, long, +) -MSG_DISCONNECT, MSG_IGNORE, MSG_UNIMPLEMENTED, MSG_DEBUG, MSG_SERVICE_REQUEST, \ - MSG_SERVICE_ACCEPT = range(1, 7) +MSG_DISCONNECT, MSG_IGNORE, MSG_UNIMPLEMENTED, MSG_DEBUG, \ + MSG_SERVICE_REQUEST, MSG_SERVICE_ACCEPT = range(1, 7) MSG_KEXINIT, MSG_NEWKEYS = range(20, 22) MSG_USERAUTH_REQUEST, MSG_USERAUTH_FAILURE, MSG_USERAUTH_SUCCESS, \ MSG_USERAUTH_BANNER = range(50, 54) @@ -31,7 +33,7 @@ MSG_USERAUTH_PK_OK = 60 MSG_USERAUTH_INFO_REQUEST, MSG_USERAUTH_INFO_RESPONSE = range(60, 62) MSG_USERAUTH_GSSAPI_RESPONSE, MSG_USERAUTH_GSSAPI_TOKEN = range(60, 62) MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE, MSG_USERAUTH_GSSAPI_ERROR,\ -MSG_USERAUTH_GSSAPI_ERRTOK, MSG_USERAUTH_GSSAPI_MIC = range(63, 67) + MSG_USERAUTH_GSSAPI_ERRTOK, MSG_USERAUTH_GSSAPI_MIC = range(63, 67) MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE = range(80, 83) MSG_CHANNEL_OPEN, MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, \ MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_DATA, MSG_CHANNEL_EXTENDED_DATA, \ @@ -55,7 +57,8 @@ cMSG_USERAUTH_INFO_REQUEST = byte_chr(MSG_USERAUTH_INFO_REQUEST) cMSG_USERAUTH_INFO_RESPONSE = byte_chr(MSG_USERAUTH_INFO_RESPONSE) cMSG_USERAUTH_GSSAPI_RESPONSE = byte_chr(MSG_USERAUTH_GSSAPI_RESPONSE) cMSG_USERAUTH_GSSAPI_TOKEN = byte_chr(MSG_USERAUTH_GSSAPI_TOKEN) -cMSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE = byte_chr(MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE) +cMSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE = \ + byte_chr(MSG_USERAUTH_GSSAPI_EXCHANGE_COMPLETE) cMSG_USERAUTH_GSSAPI_ERROR = byte_chr(MSG_USERAUTH_GSSAPI_ERROR) cMSG_USERAUTH_GSSAPI_ERRTOK = byte_chr(MSG_USERAUTH_GSSAPI_ERRTOK) cMSG_USERAUTH_GSSAPI_MIC = byte_chr(MSG_USERAUTH_GSSAPI_MIC) @@ -170,6 +173,7 @@ def asbytes(s): raise Exception('Unknown type') return s + xffffffff = long(0xffffffff) x80000000 = long(0x80000000) o666 = 438 @@ -202,4 +206,4 @@ MIN_WINDOW_SIZE = 2 ** 15 MIN_PACKET_SIZE = 2 ** 12 # Max windows size according to http://www.ietf.org/rfc/rfc4254.txt -MAX_WINDOW_SIZE = 2**32 -1 +MAX_WINDOW_SIZE = 2 ** 32 - 1 diff --git a/paramiko/config.py b/paramiko/config.py index 233a87d9..073abb36 100644 --- a/paramiko/config.py +++ b/paramiko/config.py @@ -53,11 +53,13 @@ class SSHConfig (object): """ Read an OpenSSH config from the given file object. - :param file file_obj: a file-like object to read the config file from + :param file_obj: a file-like object to read the config file from """ host = {"host": ['*'], "config": {}} for line in file_obj: - line = line.rstrip('\r\n').lstrip() + # Strip any leading or trailing whitespace from the line. + # Refer to https://github.com/paramiko/paramiko/issues/499 + line = line.strip() if not line or line.startswith('#'): continue @@ -66,7 +68,7 @@ class SSHConfig (object): raise Exception("Unparsable line %s" % line) key = match.group(1).lower() value = match.group(2) - + if key == 'host': self._config.append(host) host = { @@ -74,15 +76,17 @@ class SSHConfig (object): 'config': {} } elif key == 'proxycommand' and value.lower() == 'none': - # Proxycommands of none should not be added as an actual value. (Issue #415) - continue + # Store 'none' as None; prior to 3.x, it will get stripped out + # at the end (for compatibility with issue #415). After 3.x, it + # will simply not get stripped, leaving a nice explicit marker. + host['config'][key] = None else: if value.startswith('"') and value.endswith('"'): value = value[1:-1] - #identityfile, localforward, remoteforward keys are special cases, since they are allowed to be - # specified multiple times and they should be tried in order - # of specification. + # identityfile, localforward, remoteforward keys are special + # cases, since they are allowed to be specified multiple times + # and they should be tried in order of specification. if key in ['identityfile', 'localforward', 'remoteforward']: if key in host['config']: host['config'][key].append(value) @@ -98,7 +102,7 @@ class SSHConfig (object): The host-matching rules of OpenSSH's ``ssh_config`` man page are used: For each parameter, the first obtained value will be used. The - configuration files contain sections separated by ``Host'' + configuration files contain sections separated by ``Host`` specifications, and that section is only applied for hosts that match one of the patterns given in the specification. @@ -125,10 +129,13 @@ class SSHConfig (object): # else it will reference the original list # in self._config and update that value too # when the extend() is being called. - ret[key] = value[:] + ret[key] = value[:] if value is not None else value elif key == 'identityfile': ret[key].extend(value) ret = self._expand_variables(ret, hostname) + # TODO: remove in 3.x re #670 + if 'proxycommand' in ret and ret['proxycommand'] is None: + del ret['proxycommand'] return ret def get_hostnames(self): @@ -202,6 +209,7 @@ class SSHConfig (object): ], 'proxycommand': [ + ('~', homedir), ('%h', config['hostname']), ('%p', port), ('%r', remoteuser) @@ -209,13 +217,16 @@ class SSHConfig (object): } for k in config: + if config[k] is None: + continue if k in replacements: for find, replace in replacements[k]: if isinstance(config[k], list): for item in range(len(config[k])): if find in config[k][item]: - config[k][item] = config[k][item].\ - replace(find, str(replace)) + config[k][item] = config[k][item].replace( + find, str(replace) + ) else: if find in config[k]: config[k] = config[k].replace(find, str(replace)) @@ -257,8 +268,9 @@ class LazyFqdn(object): address_family = self.config.get('addressfamily', 'any').lower() if address_family != 'any': try: - family = socket.AF_INET if address_family == 'inet' \ - else socket.AF_INET6 + family = socket.AF_INET6 + if address_family == 'inet': + socket.AF_INET results = socket.getaddrinfo( self.host, None, diff --git a/paramiko/dsskey.py b/paramiko/dsskey.py index d7dd6275..9af5d0c1 100644 --- a/paramiko/dsskey.py +++ b/paramiko/dsskey.py @@ -20,27 +20,30 @@ DSS keys. """ -import os -from hashlib import sha1 - -from Crypto.PublicKey import DSA +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import dsa +from cryptography.hazmat.primitives.asymmetric.utils import ( + decode_dss_signature, encode_dss_signature +) from paramiko import util from paramiko.common import zero_byte -from paramiko.py3compat import long from paramiko.ssh_exception import SSHException from paramiko.message import Message from paramiko.ber import BER, BERException from paramiko.pkey import PKey -class DSSKey (PKey): +class DSSKey(PKey): """ Representation of a DSS key which can be used to sign an verify SSH2 data. """ - def __init__(self, msg=None, data=None, filename=None, password=None, vals=None, file_obj=None): + def __init__(self, msg=None, data=None, filename=None, password=None, + vals=None, file_obj=None): self.p = None self.q = None self.g = None @@ -80,13 +83,7 @@ class DSSKey (PKey): return self.asbytes() def __hash__(self): - h = hash(self.get_name()) - h = h * 37 + hash(self.p) - h = h * 37 + hash(self.q) - h = h * 37 + hash(self.g) - h = h * 37 + hash(self.y) - # h might be a long by now... - return hash(h) + return hash((self.get_name(), self.p, self.q, self.g, self.y)) def get_name(self): return 'ssh-dss' @@ -98,15 +95,21 @@ class DSSKey (PKey): return self.x is not None def sign_ssh_data(self, data): - digest = sha1(data).digest() - dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q), long(self.x))) - # generate a suitable k - qsize = len(util.deflate_long(self.q, 0)) - while True: - k = util.inflate_long(os.urandom(qsize), 1) - if (k > 2) and (k < self.q): - break - r, s = dss.sign(util.inflate_long(digest, 1), k) + key = dsa.DSAPrivateNumbers( + x=self.x, + public_numbers=dsa.DSAPublicNumbers( + y=self.y, + parameter_numbers=dsa.DSAParameterNumbers( + p=self.p, + q=self.q, + g=self.g + ) + ) + ).private_key(backend=default_backend()) + signer = key.signer(hashes.SHA1()) + signer.update(data) + r, s = decode_dss_signature(signer.finalize()) + m = Message() m.add_string('ssh-dss') # apparently, in rare cases, r or s may be shorter than 20 bytes! @@ -132,27 +135,65 @@ class DSSKey (PKey): # pull out (r, s) which are NOT encoded as mpints sigR = util.inflate_long(sig[:20], 1) sigS = util.inflate_long(sig[20:], 1) - sigM = util.inflate_long(sha1(data).digest(), 1) - dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q))) - return dss.verify(sigM, (sigR, sigS)) - - def _encode_key(self): - if self.x is None: - raise SSHException('Not enough key information') - keylist = [0, self.p, self.q, self.g, self.y, self.x] + signature = encode_dss_signature(sigR, sigS) + + key = dsa.DSAPublicNumbers( + y=self.y, + parameter_numbers=dsa.DSAParameterNumbers( + p=self.p, + q=self.q, + g=self.g + ) + ).public_key(backend=default_backend()) + verifier = key.verifier(signature, hashes.SHA1()) + verifier.update(data) try: - b = BER() - b.encode(keylist) - except BERException: - raise SSHException('Unable to create ber encoding of key') - return b.asbytes() + verifier.verify() + except InvalidSignature: + return False + else: + return True def write_private_key_file(self, filename, password=None): - self._write_private_key_file('DSA', filename, self._encode_key(), password) + key = dsa.DSAPrivateNumbers( + x=self.x, + public_numbers=dsa.DSAPublicNumbers( + y=self.y, + parameter_numbers=dsa.DSAParameterNumbers( + p=self.p, + q=self.q, + g=self.g + ) + ) + ).private_key(backend=default_backend()) + + self._write_private_key_file( + filename, + key, + serialization.PrivateFormat.TraditionalOpenSSL, + password=password + ) def write_private_key(self, file_obj, password=None): - self._write_private_key('DSA', file_obj, self._encode_key(), password) + key = dsa.DSAPrivateNumbers( + x=self.x, + public_numbers=dsa.DSAPublicNumbers( + y=self.y, + parameter_numbers=dsa.DSAParameterNumbers( + p=self.p, + q=self.q, + g=self.g + ) + ) + ).private_key(backend=default_backend()) + + self._write_private_key( + file_obj, + key, + serialization.PrivateFormat.TraditionalOpenSSL, + password=password + ) @staticmethod def generate(bits=1024, progress_func=None): @@ -161,17 +202,22 @@ class DSSKey (PKey): generate a new host key or authentication key. :param int bits: number of bits the generated key should be. - :param function progress_func: - an optional function to call at key points in key generation (used - by ``pyCrypto.PublicKey``). + :param progress_func: Unused :return: new `.DSSKey` private key """ - dsa = DSA.generate(bits, os.urandom, progress_func) - key = DSSKey(vals=(dsa.p, dsa.q, dsa.g, dsa.y)) - key.x = dsa.x + numbers = dsa.generate_private_key( + bits, backend=default_backend() + ).private_numbers() + key = DSSKey(vals=( + numbers.public_numbers.parameter_numbers.p, + numbers.public_numbers.parameter_numbers.q, + numbers.public_numbers.parameter_numbers.g, + numbers.public_numbers.y + )) + key.x = numbers.x return key - ### internals... + # ...internals... def _from_private_key_file(self, filename, password): data = self._read_private_key_file('DSA', filename, password) @@ -188,8 +234,13 @@ class DSSKey (PKey): keylist = BER(data).decode() except BERException as e: raise SSHException('Unable to parse key file: ' + str(e)) - if (type(keylist) is not list) or (len(keylist) < 6) or (keylist[0] != 0): - raise SSHException('not a valid DSA private key file (bad ber encoding)') + if ( + type(keylist) is not list or + len(keylist) < 6 or + keylist[0] != 0 + ): + raise SSHException( + 'not a valid DSA private key file (bad ber encoding)') self.p = keylist[1] self.q = keylist[2] self.g = keylist[3] diff --git a/paramiko/ecdsakey.py b/paramiko/ecdsakey.py index 6b047959..fa850c2e 100644 --- a/paramiko/ecdsakey.py +++ b/paramiko/ecdsakey.py @@ -20,24 +20,87 @@ ECDSA keys """ -import binascii -from hashlib import sha256 +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ec +from cryptography.hazmat.primitives.asymmetric.utils import ( + decode_dss_signature, encode_dss_signature +) -from ecdsa import SigningKey, VerifyingKey, der, curves - -from paramiko.common import four_byte, one_byte +from paramiko.common import four_byte from paramiko.message import Message from paramiko.pkey import PKey -from paramiko.py3compat import byte_chr, u from paramiko.ssh_exception import SSHException +from paramiko.util import deflate_long + +class _ECDSACurve(object): + """ + Represents a specific ECDSA Curve (nistp256, nistp384, etc). -class ECDSAKey (PKey): + Handles the generation of the key format identifier and the selection of + the proper hash function. Also grabs the proper curve from the 'ecdsa' + package. + """ + def __init__(self, curve_class, nist_name): + self.nist_name = nist_name + self.key_length = curve_class.key_size + + # Defined in RFC 5656 6.2 + self.key_format_identifier = "ecdsa-sha2-" + self.nist_name + + # Defined in RFC 5656 6.2.1 + if self.key_length <= 256: + self.hash_object = hashes.SHA256 + elif self.key_length <= 384: + self.hash_object = hashes.SHA384 + else: + self.hash_object = hashes.SHA512 + + self.curve_class = curve_class + + +class _ECDSACurveSet(object): + """ + A collection to hold the ECDSA curves. Allows querying by oid and by key + format identifier. The two ways in which ECDSAKey needs to be able to look + up curves. + """ + def __init__(self, ecdsa_curves): + self.ecdsa_curves = ecdsa_curves + + def get_key_format_identifier_list(self): + return [curve.key_format_identifier for curve in self.ecdsa_curves] + + def get_by_curve_class(self, curve_class): + for curve in self.ecdsa_curves: + if curve.curve_class == curve_class: + return curve + + def get_by_key_format_identifier(self, key_format_identifier): + for curve in self.ecdsa_curves: + if curve.key_format_identifier == key_format_identifier: + return curve + + def get_by_key_length(self, key_length): + for curve in self.ecdsa_curves: + if curve.key_length == key_length: + return curve + + +class ECDSAKey(PKey): """ Representation of an ECDSA key which can be used to sign and verify SSH2 data. """ + _ECDSA_CURVES = _ECDSACurveSet([ + _ECDSACurve(ec.SECP256R1, 'nistp256'), + _ECDSACurve(ec.SECP384R1, 'nistp384'), + _ECDSACurve(ec.SECP521R1, 'nistp521'), + ]) + def __init__(self, msg=None, data=None, filename=None, password=None, vals=None, file_obj=None, validate_point=True): self.verifying_key = None @@ -52,32 +115,49 @@ class ECDSAKey (PKey): msg = Message(data) if vals is not None: self.signing_key, self.verifying_key = vals + c_class = self.signing_key.curve.__class__ + self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(c_class) else: if msg is None: raise SSHException('Key object may not be empty') - if msg.get_text() != 'ecdsa-sha2-nistp256': + self.ecdsa_curve = self._ECDSA_CURVES.get_by_key_format_identifier( + msg.get_text()) + if self.ecdsa_curve is None: raise SSHException('Invalid key') curvename = msg.get_text() - if curvename != 'nistp256': + if curvename != self.ecdsa_curve.nist_name: raise SSHException("Can't handle curve of type %s" % curvename) pointinfo = msg.get_binary() - if pointinfo[0:1] != four_byte: - raise SSHException('Point compression is being used: %s' % - binascii.hexlify(pointinfo)) - self.verifying_key = VerifyingKey.from_string(pointinfo[1:], - curve=curves.NIST256p, - validate_point=validate_point) - self.size = 256 + try: + numbers = ec.EllipticCurvePublicNumbers.from_encoded_point( + self.ecdsa_curve.curve_class(), pointinfo + ) + except ValueError: + raise SSHException("Invalid public key") + self.verifying_key = numbers.public_key(backend=default_backend()) + + @classmethod + def supported_key_format_identifiers(cls): + return cls._ECDSA_CURVES.get_key_format_identifier_list() def asbytes(self): key = self.verifying_key m = Message() - m.add_string('ecdsa-sha2-nistp256') - m.add_string('nistp256') + m.add_string(self.ecdsa_curve.key_format_identifier) + m.add_string(self.ecdsa_curve.nist_name) + + numbers = key.public_numbers() + + key_size_bytes = (key.curve.key_size + 7) // 8 - point_str = four_byte + key.to_string() + x_bytes = deflate_long(numbers.x, add_sign_padding=False) + x_bytes = b'\x00' * (key_size_bytes - len(x_bytes)) + x_bytes + y_bytes = deflate_long(numbers.y, add_sign_padding=False) + y_bytes = b'\x00' * (key_size_bytes - len(y_bytes)) + y_bytes + + point_str = four_byte + x_bytes + y_bytes m.add_string(point_str) return m.asbytes() @@ -85,63 +165,83 @@ class ECDSAKey (PKey): return self.asbytes() def __hash__(self): - h = hash(self.get_name()) - h = h * 37 + hash(self.verifying_key.pubkey.point.x()) - h = h * 37 + hash(self.verifying_key.pubkey.point.y()) - return hash(h) + return hash((self.get_name(), self.verifying_key.public_numbers().x, + self.verifying_key.public_numbers().y)) def get_name(self): - return 'ecdsa-sha2-nistp256' + return self.ecdsa_curve.key_format_identifier def get_bits(self): - return self.size + return self.ecdsa_curve.key_length def can_sign(self): return self.signing_key is not None def sign_ssh_data(self, data): - sig = self.signing_key.sign_deterministic( - data, sigencode=self._sigencode, hashfunc=sha256) + ecdsa = ec.ECDSA(self.ecdsa_curve.hash_object()) + signer = self.signing_key.signer(ecdsa) + signer.update(data) + sig = signer.finalize() + r, s = decode_dss_signature(sig) + m = Message() - m.add_string('ecdsa-sha2-nistp256') - m.add_string(sig) + m.add_string(self.ecdsa_curve.key_format_identifier) + m.add_string(self._sigencode(r, s)) return m def verify_ssh_sig(self, data, msg): - if msg.get_text() != 'ecdsa-sha2-nistp256': + if msg.get_text() != self.ecdsa_curve.key_format_identifier: return False sig = msg.get_binary() + sigR, sigS = self._sigdecode(sig) + signature = encode_dss_signature(sigR, sigS) - # verify the signature by SHA'ing the data and encrypting it - # using the public key. - hash_obj = sha256(data).digest() - return self.verifying_key.verify_digest(sig, hash_obj, - sigdecode=self._sigdecode) + verifier = self.verifying_key.verifier( + signature, ec.ECDSA(self.ecdsa_curve.hash_object()) + ) + verifier.update(data) + try: + verifier.verify() + except InvalidSignature: + return False + else: + return True def write_private_key_file(self, filename, password=None): - key = self.signing_key or self.verifying_key - self._write_private_key_file('EC', filename, key.to_der(), password) + self._write_private_key_file( + filename, + self.signing_key, + serialization.PrivateFormat.TraditionalOpenSSL, + password=password + ) def write_private_key(self, file_obj, password=None): - key = self.signing_key or self.verifying_key - self._write_private_key('EC', file_obj, key.to_der(), password) + self._write_private_key( + file_obj, + self.signing_key, + serialization.PrivateFormat.TraditionalOpenSSL, + password=password + ) - @staticmethod - def generate(curve=curves.NIST256p, progress_func=None): + @classmethod + def generate(cls, curve=ec.SECP256R1(), progress_func=None, bits=None): """ - Generate a new private RSA key. This factory function can be used to + Generate a new private ECDSA key. This factory function can be used to generate a new host key or authentication key. - :param function progress_func: - an optional function to call at key points in key generation (used - by ``pyCrypto.PublicKey``). - :returns: A new private key (`.RSAKey`) object + :param progress_func: Not used for this type of key. + :returns: A new private key (`.ECDSAKey`) object """ - signing_key = SigningKey.generate(curve) - key = ECDSAKey(vals=(signing_key, signing_key.get_verifying_key())) - return key + if bits is not None: + curve = cls._ECDSA_CURVES.get_by_key_length(bits) + if curve is None: + raise ValueError("Unsupported key length: %d" % bits) + curve = curve.curve_class() - ### internals... + private_key = ec.generate_private_key(curve, backend=default_backend()) + return ECDSAKey(vals=(private_key, private_key.public_key())) + + # ...internals... def _from_private_key_file(self, filename, password): data = self._read_private_key_file('EC', filename, password) @@ -151,27 +251,26 @@ class ECDSAKey (PKey): data = self._read_private_key('EC', file_obj, password) self._decode_key(data) - ALLOWED_PADDINGS = [one_byte, byte_chr(2) * 2, byte_chr(3) * 3, byte_chr(4) * 4, - byte_chr(5) * 5, byte_chr(6) * 6, byte_chr(7) * 7] - def _decode_key(self, data): - s, padding = der.remove_sequence(data) - if padding: - if padding not in self.ALLOWED_PADDINGS: - raise ValueError("weird padding: %s" % u(binascii.hexlify(data))) - data = data[:-len(padding)] - key = SigningKey.from_der(data) + try: + key = serialization.load_der_private_key( + data, password=None, backend=default_backend() + ) + except (ValueError, AssertionError) as e: + raise SSHException(str(e)) + self.signing_key = key - self.verifying_key = key.get_verifying_key() - self.size = 256 + self.verifying_key = key.public_key() + curve_class = key.curve.__class__ + self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(curve_class) - def _sigencode(self, r, s, order): + def _sigencode(self, r, s): msg = Message() msg.add_mpint(r) msg.add_mpint(s) return msg.asbytes() - def _sigdecode(self, sig, order): + def _sigdecode(self, sig): msg = Message(sig) r = msg.get_mpint() s = msg.get_mpint() diff --git a/paramiko/ed25519key.py b/paramiko/ed25519key.py new file mode 100644 index 00000000..e1a8a732 --- /dev/null +++ b/paramiko/ed25519key.py @@ -0,0 +1,194 @@ +# This file is part of paramiko. +# +# Paramiko is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Paramiko; if not, write to the Free Software Foundation, Inc., +# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + +import bcrypt + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher + +import nacl.signing + +import six + +from paramiko.message import Message +from paramiko.pkey import PKey +from paramiko.ssh_exception import SSHException, PasswordRequiredException + + +OPENSSH_AUTH_MAGIC = b"openssh-key-v1\x00" + + +def unpad(data): + # At the moment, this is only used for unpadding private keys on disk. This + # really ought to be made constant time (possibly by upstreaming this logic + # into pyca/cryptography). + padding_length = six.indexbytes(data, -1) + if padding_length > 16: + raise SSHException("Invalid key") + for i in range(1, padding_length + 1): + if six.indexbytes(data, -i) != (padding_length - i + 1): + raise SSHException("Invalid key") + return data[:-padding_length] + + +class Ed25519Key(PKey): + def __init__(self, msg=None, data=None, filename=None, password=None): + verifying_key = signing_key = None + if msg is None and data is not None: + msg = Message(data) + if msg is not None: + if msg.get_text() != "ssh-ed25519": + raise SSHException("Invalid key") + verifying_key = nacl.signing.VerifyKey(msg.get_binary()) + elif filename is not None: + with open(filename, "r") as f: + data = self._read_private_key("OPENSSH", f) + signing_key = self._parse_signing_key_data(data, password) + + if signing_key is None and verifying_key is None: + raise ValueError("need a key") + + self._signing_key = signing_key + self._verifying_key = verifying_key + + def _parse_signing_key_data(self, data, password): + from paramiko.transport import Transport + # We may eventually want this to be usable for other key types, as + # OpenSSH moves to it, but for now this is just for Ed25519 keys. + # This format is described here: + # https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key + # The description isn't totally complete, and I had to refer to the + # source for a full implementation. + message = Message(data) + if message.get_bytes(len(OPENSSH_AUTH_MAGIC)) != OPENSSH_AUTH_MAGIC: + raise SSHException("Invalid key") + + ciphername = message.get_text() + kdfname = message.get_text() + kdfoptions = message.get_binary() + num_keys = message.get_int() + + if kdfname == "none": + # kdfname of "none" must have an empty kdfoptions, the ciphername + # must be "none" + if kdfoptions or ciphername != "none": + raise SSHException("Invalid key") + elif kdfname == "bcrypt": + if not password: + raise PasswordRequiredException( + "Private key file is encrypted" + ) + kdf = Message(kdfoptions) + bcrypt_salt = kdf.get_binary() + bcrypt_rounds = kdf.get_int() + else: + raise SSHException("Invalid key") + + if ciphername != "none" and ciphername not in Transport._cipher_info: + raise SSHException("Invalid key") + + public_keys = [] + for _ in range(num_keys): + pubkey = Message(message.get_binary()) + if pubkey.get_text() != "ssh-ed25519": + raise SSHException("Invalid key") + public_keys.append(pubkey.get_binary()) + + private_ciphertext = message.get_binary() + if ciphername == "none": + private_data = private_ciphertext + else: + cipher = Transport._cipher_info[ciphername] + key = bcrypt.kdf( + password=password, + salt=bcrypt_salt, + desired_key_bytes=cipher["key-size"] + cipher["block-size"], + rounds=bcrypt_rounds, + # We can't control how many rounds are on disk, so no sense + # warning about it. + ignore_few_rounds=True, + ) + decryptor = Cipher( + cipher["class"](key[:cipher["key-size"]]), + cipher["mode"](key[cipher["key-size"]:]), + backend=default_backend() + ).decryptor() + private_data = ( + decryptor.update(private_ciphertext) + decryptor.finalize() + ) + + message = Message(unpad(private_data)) + if message.get_int() != message.get_int(): + raise SSHException("Invalid key") + + signing_keys = [] + for i in range(num_keys): + if message.get_text() != "ssh-ed25519": + raise SSHException("Invalid key") + # A copy of the public key, again, ignore. + public = message.get_binary() + key_data = message.get_binary() + # The second half of the key data is yet another copy of the public + # key... + signing_key = nacl.signing.SigningKey(key_data[:32]) + # Verify that all the public keys are the same... + assert ( + signing_key.verify_key.encode() == public == public_keys[i] == + key_data[32:] + ) + signing_keys.append(signing_key) + # Comment, ignore. + message.get_binary() + + if len(signing_keys) != 1: + raise SSHException("Invalid key") + return signing_keys[0] + + def asbytes(self): + if self.can_sign(): + v = self._signing_key.verify_key + else: + v = self._verifying_key + m = Message() + m.add_string("ssh-ed25519") + m.add_string(v.encode()) + return m.asbytes() + + def get_name(self): + return "ssh-ed25519" + + def get_bits(self): + return 256 + + def can_sign(self): + return self._signing_key is not None + + def sign_ssh_data(self, data): + m = Message() + m.add_string("ssh-ed25519") + m.add_string(self._signing_key.sign(data).signature) + return m + + def verify_ssh_sig(self, data, msg): + if msg.get_text() != "ssh-ed25519": + return False + + try: + self._verifying_key.verify(data, msg.get_binary()) + except nacl.exceptions.BadSignatureError: + return False + else: + return True diff --git a/paramiko/file.py b/paramiko/file.py index e3b0a16a..5212091a 100644 --- a/paramiko/file.py +++ b/paramiko/file.py @@ -15,8 +15,9 @@ # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -from paramiko.common import linefeed_byte_value, crlf, cr_byte, linefeed_byte, \ - cr_byte_value +from paramiko.common import ( + linefeed_byte_value, crlf, cr_byte, linefeed_byte, cr_byte_value, +) from paramiko.py3compat import BytesIO, PY2, u, b, bytes_types from paramiko.util import ClosingContextManager @@ -59,14 +60,14 @@ class BufferedFile (ClosingContextManager): def __del__(self): self.close() - + def __iter__(self): """ Returns an iterator that can be used to iterate over the lines in this file. This iterator happens to return the file itself, since a file is its own iterator. - :raises ValueError: if the file is closed. + :raises: ``ValueError`` -- if the file is closed. """ if self._closed: raise ValueError('I/O operation on closed file') @@ -92,12 +93,12 @@ class BufferedFile (ClosingContextManager): def next(self): """ Returns the next line from the input, or raises - `~exceptions.StopIteration` when EOF is hit. Unlike Python file + ``StopIteration`` when EOF is hit. Unlike Python file objects, it's okay to mix calls to `next` and `readline`. - :raises StopIteration: when the end of the file is reached. + :raises: ``StopIteration`` -- when the end of the file is reached. - :return: a line (`str`) read from the file. + :returns: a line (`str`) read from the file. """ line = self.readline() if not line: @@ -106,11 +107,11 @@ class BufferedFile (ClosingContextManager): else: def __next__(self): """ - Returns the next line from the input, or raises `.StopIteration` when - EOF is hit. Unlike python file objects, it's okay to mix calls to - `.next` and `.readline`. + Returns the next line from the input, or raises ``StopIteration`` + when EOF is hit. Unlike python file objects, it's okay to mix + calls to `.next` and `.readline`. - :raises StopIteration: when the end of the file is reached. + :raises: ``StopIteration`` -- when the end of the file is reached. :returns: a line (`str`) read from the file. """ @@ -119,11 +120,53 @@ class BufferedFile (ClosingContextManager): raise StopIteration return line + def readable(self): + """ + Check if the file can be read from. + + :returns: + `True` if the file can be read from. If `False`, `read` will raise + an exception. + """ + return (self._flags & self.FLAG_READ) == self.FLAG_READ + + def writable(self): + """ + Check if the file can be written to. + + :returns: + `True` if the file can be written to. If `False`, `write` will + raise an exception. + """ + return (self._flags & self.FLAG_WRITE) == self.FLAG_WRITE + + def seekable(self): + """ + Check if the file supports random access. + + :returns: + `True` if the file supports random access. If `False`, `seek` will + raise an exception. + """ + return False + + def readinto(self, buff): + """ + Read up to ``len(buff)`` bytes into ``bytearray`` *buff* and return the + number of bytes read. + + :returns: + The number of bytes read. + """ + data = self.read(len(buff)) + buff[:len(data)] = data + return len(data) + def read(self, size=None): """ - Read at most ``size`` bytes from the file (less if we hit the end of the - file first). If the ``size`` argument is negative or omitted, read all - the remaining data in the file. + Read at most ``size`` bytes from the file (less if we hit the end of + the file first). If the ``size`` argument is negative or omitted, + read all the remaining data in the file. .. note:: ``'b'`` mode flag is ignored (``self.FLAG_BINARY`` in @@ -132,7 +175,7 @@ class BufferedFile (ClosingContextManager): text data. :param int size: maximum number of bytes to read - :return: + :returns: data read from the file (as bytes), or an empty string if EOF was encountered immediately """ @@ -155,12 +198,12 @@ class BufferedFile (ClosingContextManager): result += new_data self._realpos += len(new_data) self._pos += len(new_data) - return result + return result if size <= len(self._rbuffer): result = self._rbuffer[:size] self._rbuffer = self._rbuffer[size:] self._pos += len(result) - return result + return result while len(self._rbuffer) < size: read_size = size - len(self._rbuffer) if self._flags & self.FLAG_BUFFERED: @@ -176,7 +219,7 @@ class BufferedFile (ClosingContextManager): result = self._rbuffer[:size] self._rbuffer = self._rbuffer[size:] self._pos += len(result) - return result + return result def readline(self, size=None): """ @@ -192,7 +235,7 @@ class BufferedFile (ClosingContextManager): characters (``'\\0'``) if they occurred in the input. :param int size: maximum length of returned string. - :return: + :returns: next line of the file, or an empty string if the end of the file has been reached. @@ -208,7 +251,11 @@ class BufferedFile (ClosingContextManager): line = self._rbuffer truncated = False while True: - if self._at_trailing_cr and (self._flags & self.FLAG_UNIVERSAL_NEWLINE) and (len(line) > 0): + if ( + self._at_trailing_cr and + self._flags & self.FLAG_UNIVERSAL_NEWLINE and + len(line) > 0 + ): # edge case: the newline may be '\r\n' and we may have read # only the first '\r' last time. if line[0] == linefeed_byte_value: @@ -229,7 +276,13 @@ class BufferedFile (ClosingContextManager): n = size - len(line) else: n = self._bufsize - if (linefeed_byte in line) or ((self._flags & self.FLAG_UNIVERSAL_NEWLINE) and (cr_byte in line)): + if ( + linefeed_byte in line or + ( + self._flags & self.FLAG_UNIVERSAL_NEWLINE and + cr_byte in line + ) + ): break try: new_data = self._read(n) @@ -252,12 +305,20 @@ class BufferedFile (ClosingContextManager): self._pos += len(line) return line if self._flags & self.FLAG_BINARY else u(line) xpos = pos + 1 - if (line[pos] == cr_byte_value) and (xpos < len(line)) and (line[xpos] == linefeed_byte_value): + if ( + line[pos] == cr_byte_value and + xpos < len(line) and + line[xpos] == linefeed_byte_value + ): xpos += 1 - # if the string was truncated, _rbuffer needs to have the string after + # if the string was truncated, _rbuffer needs to have the string after # the newline character plus the truncated part of the line we stored # earlier in _rbuffer - self._rbuffer = line[xpos:] + self._rbuffer if truncated else line[xpos:] + if truncated: + self._rbuffer = line[xpos:] + self._rbuffer + else: + self._rbuffer = line[xpos:] + lf = line[pos:xpos] line = line[:pos] + linefeed_byte if (len(self._rbuffer) == 0) and (lf == cr_byte): @@ -277,7 +338,7 @@ class BufferedFile (ClosingContextManager): after rounding up to an internal buffer size) are read. :param int sizehint: desired maximum number of bytes to read. - :return: `list` of lines read from the file. + :returns: `list` of lines read from the file. """ lines = [] byte_count = 0 @@ -300,14 +361,14 @@ class BufferedFile (ClosingContextManager): If a file is opened in append mode (``'a'`` or ``'a+'``), any seek operations will be undone at the next write (as the file position will move back to the end of the file). - + :param int offset: position to move to within the file, relative to ``whence``. :param int whence: type of movement: 0 = absolute; 1 = relative to the current position; 2 = relative to the end of the file. - :raises IOError: if the file doesn't support random access. + :raises: ``IOError`` -- if the file doesn't support random access. """ raise IOError('File does not support seeking.') @@ -317,7 +378,7 @@ class BufferedFile (ClosingContextManager): useful if the underlying file doesn't support random access, or was opened in append mode. - :return: file position (`number <int>` of bytes). + :returns: file position (`number <int>` of bytes). """ return self._pos @@ -328,7 +389,7 @@ class BufferedFile (ClosingContextManager): written yet. (Use `flush` or `close` to force buffered data to be written out.) - :param str data: data to write + :param data: ``str``/``bytes`` data to write """ data = b(data) if self._closed: @@ -362,7 +423,7 @@ class BufferedFile (ClosingContextManager): name is intended to match `readlines`; `writelines` does not add line separators.) - :param iterable sequence: an iterable sequence of strings. + :param sequence: an iterable sequence of strings. """ for line in sequence: self.write(line) @@ -379,7 +440,7 @@ class BufferedFile (ClosingContextManager): def closed(self): return self._closed - ### overrides... + # ...overrides... def _read(self, size): """ @@ -407,7 +468,7 @@ class BufferedFile (ClosingContextManager): """ return 0 - ### internals... + # ...internals... def _set_mode(self, mode='r', bufsize=-1): """ @@ -471,7 +532,10 @@ class BufferedFile (ClosingContextManager): return if self.newlines is None: self.newlines = newline - elif self.newlines != newline and isinstance(self.newlines, bytes_types): + elif ( + self.newlines != newline and + isinstance(self.newlines, bytes_types) + ): self.newlines = (self.newlines, newline) elif newline not in self.newlines: self.newlines += (newline,) diff --git a/paramiko/hostkeys.py b/paramiko/hostkeys.py index 84868875..3e27fd52 100644 --- a/paramiko/hostkeys.py +++ b/paramiko/hostkeys.py @@ -35,6 +35,8 @@ from paramiko.dsskey import DSSKey from paramiko.rsakey import RSAKey from paramiko.util import get_logger, constant_time_bytes_eq from paramiko.ecdsakey import ECDSAKey +from paramiko.ed25519key import Ed25519Key +from paramiko.ssh_exception import SSHException class HostKeys (MutableMapping): @@ -89,14 +91,17 @@ class HostKeys (MutableMapping): :param str filename: name of the file to read host keys from - :raises IOError: if there was an error reading the file + :raises: ``IOError`` -- if there was an error reading the file """ with open(filename, 'r') as f: - for lineno, line in enumerate(f): + for lineno, line in enumerate(f, 1): line = line.strip() if (len(line) == 0) or (line[0] == '#'): continue - e = HostKeyEntry.from_line(line, lineno) + try: + e = HostKeyEntry.from_line(line, lineno) + except SSHException: + continue if e is not None: _hostnames = e.hostnames for h in _hostnames: @@ -107,14 +112,14 @@ class HostKeys (MutableMapping): def save(self, filename): """ - Save host keys into a file, in the format used by OpenSSH. The order of - keys in the file will be preserved when possible (if these keys were + Save host keys into a file, in the format used by OpenSSH. The order + of keys in the file will be preserved when possible (if these keys were loaded from a file originally). The single exception is that combined lines will be split into individual key lines, which is arguably a bug. :param str filename: name of the file to write - :raises IOError: if there was an error writing the file + :raises: ``IOError`` -- if there was an error writing the file .. versionadded:: 1.6.1 """ @@ -131,7 +136,8 @@ class HostKeys (MutableMapping): returned. The keytype will be either ``"ssh-rsa"`` or ``"ssh-dss"``. :param str hostname: the hostname (or IP) to lookup - :return: dict of `str` -> `.PKey` keys associated with this host (or ``None``) + :return: dict of `str` -> `.PKey` keys associated with this host + (or ``None``) """ class SubDict (MutableMapping): def __init__(self, hostname, entries, hostkeys): @@ -174,17 +180,35 @@ class HostKeys (MutableMapping): self._hostkeys._entries.append(e) def keys(self): - return [e.key.get_name() for e in self._entries if e.key is not None] + return [ + e.key.get_name() for e in self._entries + if e.key is not None + ] entries = [] for e in self._entries: - for h in e.hostnames: - if h.startswith('|1|') and not hostname.startswith('|1|') and constant_time_bytes_eq(self.hash_host(hostname, h), h) or h == hostname: - entries.append(e) + if self._hostname_matches(hostname, e): + entries.append(e) if len(entries) == 0: return None return SubDict(hostname, entries, self) + def _hostname_matches(self, hostname, entry): + """ + Tests whether ``hostname`` string matches given SubDict ``entry``. + + :returns bool: + """ + for h in entry.hostnames: + if ( + h == hostname or + h.startswith('|1|') and + not hostname.startswith('|1|') and + constant_time_bytes_eq(self.hash_host(hostname, h), h) + ): + return True + return False + def check(self, hostname, key): """ Return True if the given key is associated with the given hostname @@ -216,15 +240,22 @@ class HostKeys (MutableMapping): def __len__(self): return len(self.keys()) - def __delitem__(self, key): - k = self[key] - def __getitem__(self, key): ret = self.lookup(key) if ret is None: raise KeyError(key) return ret + def __delitem__(self, key): + index = None + for i, entry in enumerate(self._entries): + if self._hostname_matches(key, entry): + index = i + break + if index is None: + raise KeyError(key) + self._entries.pop(index) + def __setitem__(self, hostname, entry): # don't use this please. if len(entry) == 0: @@ -233,7 +264,7 @@ class HostKeys (MutableMapping): for key_type in entry.keys(): found = False for e in self._entries: - if (hostname in e.hostnames) and (e.key.get_name() == key_type): + if (hostname in e.hostnames) and e.key.get_name() == key_type: # replace e.key = entry[key_type] found = True @@ -262,7 +293,8 @@ class HostKeys (MutableMapping): hashed hostnames in the known_hosts file. :param str hostname: the hostname to hash - :param str salt: optional salt to use when hashing (must be 20 bytes long) + :param str salt: optional salt to use when hashing + (must be 20 bytes long) :return: the hashed hostname as a `str` """ if salt is None: @@ -327,8 +359,10 @@ class HostKeyEntry: key = RSAKey(data=decodebytes(key)) elif keytype == 'ssh-dss': key = DSSKey(data=decodebytes(key)) - elif keytype == 'ecdsa-sha2-nistp256': + elif keytype in ECDSAKey.supported_key_format_identifiers(): key = ECDSAKey(data=decodebytes(key), validate_point=False) + elif keytype == 'ssh-ed25519': + key = Ed25519Key(data=decodebytes(key)) else: log.info("Unable to handle key of type %s" % (keytype,)) return None @@ -345,8 +379,10 @@ class HostKeyEntry: included. """ if self.valid: - return '%s %s %s\n' % (','.join(self.hostnames), self.key.get_name(), - self.key.get_base64()) + return '%s %s %s\n' % ( + ','.join(self.hostnames), + self.key.get_name(), + self.key.get_base64()) return None def __repr__(self): diff --git a/paramiko/kex_ecdh_nist.py b/paramiko/kex_ecdh_nist.py new file mode 100644 index 00000000..702a872d --- /dev/null +++ b/paramiko/kex_ecdh_nist.py @@ -0,0 +1,118 @@ +""" +Ephemeral Elliptic Curve Diffie-Hellman (ECDH) key exchange +RFC 5656, Section 4 +""" + +from hashlib import sha256, sha384, sha512 +from paramiko.message import Message +from paramiko.py3compat import byte_chr, long +from paramiko.ssh_exception import SSHException +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.asymmetric import ec +from binascii import hexlify + +_MSG_KEXECDH_INIT, _MSG_KEXECDH_REPLY = range(30, 32) +c_MSG_KEXECDH_INIT, c_MSG_KEXECDH_REPLY = [byte_chr(c) for c in range(30, 32)] + + +class KexNistp256(): + + name = "ecdh-sha2-nistp256" + hash_algo = sha256 + curve = ec.SECP256R1() + + def __init__(self, transport): + self.transport = transport + # private key, client public and server public keys + self.P = long(0) + self.Q_C = None + self.Q_S = None + + def start_kex(self): + self._generate_key_pair() + if self.transport.server_mode: + self.transport._expect_packet(_MSG_KEXECDH_INIT) + return + m = Message() + m.add_byte(c_MSG_KEXECDH_INIT) + # SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion + m.add_string(self.Q_C.public_numbers().encode_point()) + self.transport._send_message(m) + self.transport._expect_packet(_MSG_KEXECDH_REPLY) + + def parse_next(self, ptype, m): + if self.transport.server_mode and (ptype == _MSG_KEXECDH_INIT): + return self._parse_kexecdh_init(m) + elif not self.transport.server_mode and (ptype == _MSG_KEXECDH_REPLY): + return self._parse_kexecdh_reply(m) + raise SSHException('KexECDH asked to handle packet type %d' % ptype) + + def _generate_key_pair(self): + self.P = ec.generate_private_key(self.curve, default_backend()) + if self.transport.server_mode: + self.Q_S = self.P.public_key() + return + self.Q_C = self.P.public_key() + + def _parse_kexecdh_init(self, m): + Q_C_bytes = m.get_string() + self.Q_C = ec.EllipticCurvePublicNumbers.from_encoded_point( + self.curve, Q_C_bytes + ) + K_S = self.transport.get_server_key().asbytes() + K = self.P.exchange(ec.ECDH(), self.Q_C.public_key(default_backend())) + K = long(hexlify(K), 16) + # compute exchange hash + hm = Message() + hm.add(self.transport.remote_version, self.transport.local_version, + self.transport.remote_kex_init, self.transport.local_kex_init) + hm.add_string(K_S) + hm.add_string(Q_C_bytes) + # SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion + hm.add_string(self.Q_S.public_numbers().encode_point()) + hm.add_mpint(long(K)) + H = self.hash_algo(hm.asbytes()).digest() + self.transport._set_K_H(K, H) + sig = self.transport.get_server_key().sign_ssh_data(H) + # construct reply + m = Message() + m.add_byte(c_MSG_KEXECDH_REPLY) + m.add_string(K_S) + m.add_string(self.Q_S.public_numbers().encode_point()) + m.add_string(sig) + self.transport._send_message(m) + self.transport._activate_outbound() + + def _parse_kexecdh_reply(self, m): + K_S = m.get_string() + Q_S_bytes = m.get_string() + self.Q_S = ec.EllipticCurvePublicNumbers.from_encoded_point( + self.curve, Q_S_bytes + ) + sig = m.get_binary() + K = self.P.exchange(ec.ECDH(), self.Q_S.public_key(default_backend())) + K = long(hexlify(K), 16) + # compute exchange hash and verify signature + hm = Message() + hm.add(self.transport.local_version, self.transport.remote_version, + self.transport.local_kex_init, self.transport.remote_kex_init) + hm.add_string(K_S) + # SEC1: V2.0 2.3.3 Elliptic-Curve-Point-to-Octet-String Conversion + hm.add_string(self.Q_C.public_numbers().encode_point()) + hm.add_string(Q_S_bytes) + hm.add_mpint(K) + self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest()) + self.transport._verify_key(K_S, sig) + self.transport._activate_outbound() + + +class KexNistp384(KexNistp256): + name = "ecdh-sha2-nistp384" + hash_algo = sha384 + curve = ec.SECP384R1() + + +class KexNistp521(KexNistp256): + name = "ecdh-sha2-nistp521" + hash_algo = sha512 + curve = ec.SECP521R1() diff --git a/paramiko/kex_gex.py b/paramiko/kex_gex.py index cb548f33..ba45da18 100644 --- a/paramiko/kex_gex.py +++ b/paramiko/kex_gex.py @@ -23,7 +23,7 @@ client side, and a **lot** more on the server side. """ import os -from hashlib import sha1 +from hashlib import sha1, sha256 from paramiko import util from paramiko.common import DEBUG @@ -34,8 +34,10 @@ from paramiko.ssh_exception import SSHException _MSG_KEXDH_GEX_REQUEST_OLD, _MSG_KEXDH_GEX_GROUP, _MSG_KEXDH_GEX_INIT, \ _MSG_KEXDH_GEX_REPLY, _MSG_KEXDH_GEX_REQUEST = range(30, 35) + c_MSG_KEXDH_GEX_REQUEST_OLD, c_MSG_KEXDH_GEX_GROUP, c_MSG_KEXDH_GEX_INIT, \ - c_MSG_KEXDH_GEX_REPLY, c_MSG_KEXDH_GEX_REQUEST = [byte_chr(c) for c in range(30, 35)] + c_MSG_KEXDH_GEX_REPLY, c_MSG_KEXDH_GEX_REQUEST = \ + [byte_chr(c) for c in range(30, 35)] class KexGex (object): @@ -44,6 +46,7 @@ class KexGex (object): min_bits = 1024 max_bits = 8192 preferred_bits = 2048 + hash_algo = sha1 def __init__(self, transport): self.transport = transport @@ -57,7 +60,8 @@ class KexGex (object): def start_kex(self, _test_old_style=False): if self.transport.server_mode: - self.transport._expect_packet(_MSG_KEXDH_GEX_REQUEST, _MSG_KEXDH_GEX_REQUEST_OLD) + self.transport._expect_packet( + _MSG_KEXDH_GEX_REQUEST, _MSG_KEXDH_GEX_REQUEST_OLD) return # request a bit range: we accept (min_bits) to (max_bits), but prefer # (preferred_bits). according to the spec, we shouldn't pull the @@ -87,9 +91,10 @@ class KexGex (object): return self._parse_kexdh_gex_reply(m) elif ptype == _MSG_KEXDH_GEX_REQUEST_OLD: return self._parse_kexdh_gex_request_old(m) - raise SSHException('KexGex asked to handle packet type %d' % ptype) + raise SSHException( + 'KexGex %s asked to handle packet type %d' % self.name, ptype) - ### internals... + # ...internals... def _generate_x(self): # generate an "x" (1 < x < (p-1)/2). @@ -132,8 +137,12 @@ class KexGex (object): # generate prime pack = self.transport._get_modulus_pack() if pack is None: - raise SSHException('Can\'t do server-side gex with no modulus pack') - self.transport._log(DEBUG, 'Picking p (%d <= %d <= %d bits)' % (minbits, preferredbits, maxbits)) + raise SSHException( + 'Can\'t do server-side gex with no modulus pack') + self.transport._log( + DEBUG, + 'Picking p (%d <= %d <= %d bits)' % ( + minbits, preferredbits, maxbits)) self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits) m = Message() m.add_byte(c_MSG_KEXDH_GEX_GROUP) @@ -143,7 +152,8 @@ class KexGex (object): self.transport._expect_packet(_MSG_KEXDH_GEX_INIT) def _parse_kexdh_gex_request_old(self, m): - # same as above, but without min_bits or max_bits (used by older clients like putty) + # same as above, but without min_bits or max_bits (used by older + # clients like putty) self.preferred_bits = m.get_int() # smoosh the user's preferred size into our own limits if self.preferred_bits > self.max_bits: @@ -153,9 +163,12 @@ class KexGex (object): # generate prime pack = self.transport._get_modulus_pack() if pack is None: - raise SSHException('Can\'t do server-side gex with no modulus pack') - self.transport._log(DEBUG, 'Picking p (~ %d bits)' % (self.preferred_bits,)) - self.g, self.p = pack.get_modulus(self.min_bits, self.preferred_bits, self.max_bits) + raise SSHException( + 'Can\'t do server-side gex with no modulus pack') + self.transport._log( + DEBUG, 'Picking p (~ %d bits)' % (self.preferred_bits,)) + self.g, self.p = pack.get_modulus( + self.min_bits, self.preferred_bits, self.max_bits) m = Message() m.add_byte(c_MSG_KEXDH_GEX_GROUP) m.add_mpint(self.p) @@ -170,7 +183,9 @@ class KexGex (object): # reject if p's bit length < 1024 or > 8192 bitlen = util.bit_length(self.p) if (bitlen < 1024) or (bitlen > 8192): - raise SSHException('Server-generated gex p (don\'t ask) is out of range (%d bits)' % bitlen) + raise SSHException( + 'Server-generated gex p (don\'t ask) is out of range ' + '(%d bits)' % bitlen) self.transport._log(DEBUG, 'Got server p (%d bits)' % bitlen) self._generate_x() # now compute e = g^x mod p @@ -189,7 +204,8 @@ class KexGex (object): self.f = pow(self.g, self.x, self.p) K = pow(self.e, self.x, self.p) key = self.transport.get_server_key().asbytes() - # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) + # okay, build up the hash H of + # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa hm = Message() hm.add(self.transport.remote_version, self.transport.local_version, self.transport.remote_kex_init, self.transport.local_kex_init, @@ -204,7 +220,7 @@ class KexGex (object): hm.add_mpint(self.e) hm.add_mpint(self.f) hm.add_mpint(K) - H = sha1(hm.asbytes()).digest() + H = self.hash_algo(hm.asbytes()).digest() self.transport._set_K_H(K, H) # sign it sig = self.transport.get_server_key().sign_ssh_data(H) @@ -224,7 +240,8 @@ class KexGex (object): if (self.f < 1) or (self.f > self.p - 1): raise SSHException('Server kex "f" is out of range') K = pow(self.f, self.x, self.p) - # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) + # okay, build up the hash H of + # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa hm = Message() hm.add(self.transport.local_version, self.transport.remote_version, self.transport.local_kex_init, self.transport.remote_kex_init, @@ -239,6 +256,11 @@ class KexGex (object): hm.add_mpint(self.e) hm.add_mpint(self.f) hm.add_mpint(K) - self.transport._set_K_H(K, sha1(hm.asbytes()).digest()) + self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest()) self.transport._verify_key(host_key, sig) self.transport._activate_outbound() + + +class KexGexSHA256(KexGex): + name = 'diffie-hellman-group-exchange-sha256' + hash_algo = sha256 diff --git a/paramiko/kex_group1.py b/paramiko/kex_group1.py index a88f00d2..e8f042b1 100644 --- a/paramiko/kex_group1.py +++ b/paramiko/kex_group1.py @@ -41,10 +41,11 @@ b0000000000000000 = zero_byte * 8 class KexGroup1(object): # draft-ietf-secsh-transport-09.txt, page 17 - P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF + P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa G = 2 name = 'diffie-hellman-group1-sha1' + hash_algo = sha1 def __init__(self, transport): self.transport = transport @@ -74,14 +75,15 @@ class KexGroup1(object): return self._parse_kexdh_reply(m) raise SSHException('KexGroup1 asked to handle packet type %d' % ptype) - ### internals... + # ...internals... def _generate_x(self): # generate an "x" (1 < x < q), where q is (p-1)/2. - # p is a 128-byte (1024-bit) number, where the first 64 bits are 1. + # p is a 128-byte (1024-bit) number, where the first 64 bits are 1. # therefore q can be approximated as a 2^1023. we drop the subset of - # potential x where the first 63 bits are 1, because some of those will be - # larger than q (but this is a tiny tiny subset of potential x). + # potential x where the first 63 bits are 1, because some of those + # will be larger than q (but this is a tiny tiny subset of + # potential x). while 1: x_bytes = os.urandom(128) x_bytes = byte_mask(x_bytes[0], 0x7f) + x_bytes[1:] @@ -98,7 +100,8 @@ class KexGroup1(object): raise SSHException('Server kex "f" is out of range') sig = m.get_binary() K = pow(self.f, self.x, self.P) - # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K) + # okay, build up the hash H of + # (V_C || V_S || I_C || I_S || K_S || e || f || K) hm = Message() hm.add(self.transport.local_version, self.transport.remote_version, self.transport.local_kex_init, self.transport.remote_kex_init) @@ -117,7 +120,8 @@ class KexGroup1(object): raise SSHException('Client kex "e" is out of range') K = pow(self.e, self.x, self.P) key = self.transport.get_server_key().asbytes() - # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K) + # okay, build up the hash H of + # (V_C || V_S || I_C || I_S || K_S || e || f || K) hm = Message() hm.add(self.transport.remote_version, self.transport.local_version, self.transport.remote_kex_init, self.transport.local_kex_init) diff --git a/paramiko/kex_group14.py b/paramiko/kex_group14.py index a914aeaf..22955e34 100644 --- a/paramiko/kex_group14.py +++ b/paramiko/kex_group14.py @@ -22,12 +22,14 @@ Standard SSH key exchange ("kex" if you wanna sound cool). Diffie-Hellman of """ from paramiko.kex_group1 import KexGroup1 +from hashlib import sha1 class KexGroup14(KexGroup1): # http://tools.ietf.org/html/rfc3526#section-3 - P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF + P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa G = 2 name = 'diffie-hellman-group14-sha1' + hash_algo = sha1 diff --git a/paramiko/kex_gss.py b/paramiko/kex_gss.py index 4e8380ef..ba24c0a0 100644 --- a/paramiko/kex_gss.py +++ b/paramiko/kex_gss.py @@ -21,14 +21,15 @@ """ -This module provides GSS-API / SSPI Key Exchange as defined in RFC 4462. +This module provides GSS-API / SSPI Key Exchange as defined in :rfc:`4462`. .. note:: Credential delegation is not supported in server mode. .. note:: - `RFC 4462 Section 2.2 <http://www.ietf.org/rfc/rfc4462.txt>`_ says we are - not required to implement GSS-API error messages. Thus, in many methods - within this module, if an error occurs an exception will be thrown and the + `RFC 4462 Section 2.2 + <https://tools.ietf.org/html/rfc4462.html#section-2.2>`_ says we are not + required to implement GSS-API error messages. Thus, in many methods within + this module, if an error occurs an exception will be thrown and the connection will be terminated. .. seealso:: :doc:`/api/ssh_gss` @@ -36,33 +37,38 @@ This module provides GSS-API / SSPI Key Exchange as defined in RFC 4462. .. versionadded:: 1.15 """ +import os from hashlib import sha1 -from paramiko.common import * +from paramiko.common import * # noqa from paramiko import util from paramiko.message import Message -from paramiko.py3compat import byte_chr, long, byte_mask, byte_ord +from paramiko.py3compat import byte_chr, byte_mask, byte_ord from paramiko.ssh_exception import SSHException MSG_KEXGSS_INIT, MSG_KEXGSS_CONTINUE, MSG_KEXGSS_COMPLETE, MSG_KEXGSS_HOSTKEY,\ -MSG_KEXGSS_ERROR = range(30, 35) + MSG_KEXGSS_ERROR = range(30, 35) MSG_KEXGSS_GROUPREQ, MSG_KEXGSS_GROUP = range(40, 42) c_MSG_KEXGSS_INIT, c_MSG_KEXGSS_CONTINUE, c_MSG_KEXGSS_COMPLETE,\ -c_MSG_KEXGSS_HOSTKEY, c_MSG_KEXGSS_ERROR = [byte_chr(c) for c in range(30, 35)] -c_MSG_KEXGSS_GROUPREQ, c_MSG_KEXGSS_GROUP = [byte_chr(c) for c in range(40, 42)] + c_MSG_KEXGSS_HOSTKEY, c_MSG_KEXGSS_ERROR = [ + byte_chr(c) for c in range(30, 35) + ] +c_MSG_KEXGSS_GROUPREQ, c_MSG_KEXGSS_GROUP = [ + byte_chr(c) for c in range(40, 42) +] class KexGSSGroup1(object): """ - GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange - as defined in `RFC 4462 Section 2 <http://www.ietf.org/rfc/rfc4462.txt>`_ + GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange as defined in `RFC + 4462 Section 2 <https://tools.ietf.org/html/rfc4462.html#section-2>`_ """ # draft-ietf-secsh-transport-09.txt, page 17 - P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF + P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF # noqa G = 2 - b7fffffffffffffff = byte_chr(0x7f) + max_byte * 7 - b0000000000000000 = zero_byte * 8 + b7fffffffffffffff = byte_chr(0x7f) + max_byte * 7 # noqa + b0000000000000000 = zero_byte * 8 # noqa NAME = "gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==" def __init__(self, transport): @@ -102,7 +108,7 @@ class KexGSSGroup1(object): """ Parse the next packet. - :param char ptype: The type of the incomming packet + :param ptype: The (string) type of the incoming packet :param `.Message` m: The paket content """ if self.transport.server_mode and (ptype == MSG_KEXGSS_INIT): @@ -125,14 +131,14 @@ class KexGSSGroup1(object): generate an "x" (1 < x < q), where q is (p-1)/2. p is a 128-byte (1024-bit) number, where the first 64 bits are 1. therefore q can be approximated as a 2^1023. we drop the subset of - potential x where the first 63 bits are 1, because some of those will be - larger than q (but this is a tiny tiny subset of potential x). + potential x where the first 63 bits are 1, because some of those will + be larger than q (but this is a tiny tiny subset of potential x). """ while 1: - x_bytes = self.transport.rng.read(128) + x_bytes = os.urandom(128) x_bytes = byte_mask(x_bytes[0], 0x7f) + x_bytes[1:] - if (x_bytes[:8] != self.b7fffffffffffffff) and \ - (x_bytes[:8] != self.b0000000000000000): + first = x_bytes[:8] + if first not in (self.b7fffffffffffffff, self.b0000000000000000): break self.x = util.inflate_long(x_bytes) @@ -154,18 +160,21 @@ class KexGSSGroup1(object): """ Parse the SSH2_MSG_KEXGSS_CONTINUE message. - :param `.Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE message + :param `.Message` m: The content of the SSH2_MSG_KEXGSS_CONTINUE + message """ if not self.transport.server_mode: srv_token = m.get_string() m = Message() m.add_byte(c_MSG_KEXGSS_CONTINUE) - m.add_string(self.kexgss.ssh_init_sec_context(target=self.gss_host, - recv_token=srv_token)) + m.add_string(self.kexgss.ssh_init_sec_context( + target=self.gss_host, recv_token=srv_token)) self.transport.send_message(m) - self.transport._expect_packet(MSG_KEXGSS_CONTINUE, - MSG_KEXGSS_COMPLETE, - MSG_KEXGSS_ERROR) + self.transport._expect_packet( + MSG_KEXGSS_CONTINUE, + MSG_KEXGSS_COMPLETE, + MSG_KEXGSS_ERROR + ) else: pass @@ -173,7 +182,8 @@ class KexGSSGroup1(object): """ Parse the SSH2_MSG_KEXGSS_COMPLETE message (client mode). - :param `.Message` m: The content of the SSH2_MSG_KEXGSS_COMPLETE message + :param `.Message` m: The content of the + SSH2_MSG_KEXGSS_COMPLETE message """ # client mode if self.transport.host_key is None: @@ -188,7 +198,8 @@ class KexGSSGroup1(object): if bool: srv_token = m.get_string() K = pow(self.f, self.x, self.P) - # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K) + # okay, build up the hash H of + # (V_C || V_S || I_C || I_S || K_S || e || f || K) hm = Message() hm.add(self.transport.local_version, self.transport.remote_version, self.transport.local_kex_init, self.transport.remote_kex_init) @@ -221,7 +232,8 @@ class KexGSSGroup1(object): K = pow(self.e, self.x, self.P) self.transport.host_key = NullHostKey() key = self.transport.host_key.__str__() - # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || e || f || K) + # okay, build up the hash H of + # (V_C || V_S || I_C || I_S || K_S || e || f || K) hm = Message() hm.add(self.transport.remote_version, self.transport.local_version, self.transport.remote_kex_init, self.transport.local_kex_init) @@ -269,7 +281,7 @@ class KexGSSGroup1(object): maj_status = m.get_int() min_status = m.get_int() err_msg = m.get_string() - lang_tag = m.get_string() # we don't care about the language! + m.get_string() # we don't care about the language! raise SSHException("GSS-API Error:\nMajor Status: %s\nMinor Status: %s\ \nError Message: %s\n") % (str(maj_status), str(min_status), @@ -278,18 +290,19 @@ class KexGSSGroup1(object): class KexGSSGroup14(KexGSSGroup1): """ - GSS-API / SSPI Authenticated Diffie-Hellman Group14 Key Exchange - as defined in `RFC 4462 Section 2 <http://www.ietf.org/rfc/rfc4462.txt>`_ + GSS-API / SSPI Authenticated Diffie-Hellman Group14 Key Exchange as defined + in `RFC 4462 Section 2 + <https://tools.ietf.org/html/rfc4462.html#section-2>`_ """ - P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF + P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa G = 2 NAME = "gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==" class KexGSSGex(object): """ - GSS-API / SSPI Authenticated Diffie-Hellman Group Exchange - as defined in `RFC 4462 Section 2 <http://www.ietf.org/rfc/rfc4462.txt>`_ + GSS-API / SSPI Authenticated Diffie-Hellman Group Exchange as defined in + `RFC 4462 Section 2 <https://tools.ietf.org/html/rfc4462.html#section-2>`_ """ NAME = "gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==" min_bits = 1024 @@ -332,7 +345,7 @@ class KexGSSGex(object): """ Parse the next packet. - :param char ptype: The type of the incomming packet + :param ptype: The (string) type of the incoming packet :param `.Message` m: The paket content """ if ptype == MSG_KEXGSS_GROUPREQ: @@ -364,7 +377,7 @@ class KexGSSGex(object): qhbyte <<= 1 qmask >>= 1 while True: - x_bytes = self.transport.rng.read(byte_count) + x_bytes = os.urandom(byte_count) x_bytes = byte_mask(x_bytes[0], qmask) + x_bytes[1:] x = util.inflate_long(x_bytes, 1) if (x > 1) and (x < q): @@ -375,7 +388,8 @@ class KexGSSGex(object): """ Parse the SSH2_MSG_KEXGSS_GROUPREQ message (server mode). - :param `.Message` m: The content of the SSH2_MSG_KEXGSS_GROUPREQ message + :param `.Message` m: The content of the + SSH2_MSG_KEXGSS_GROUPREQ message """ minbits = m.get_int() preferredbits = m.get_int() @@ -399,8 +413,12 @@ class KexGSSGex(object): # generate prime pack = self.transport._get_modulus_pack() if pack is None: - raise SSHException('Can\'t do server-side gex with no modulus pack') - self.transport._log(DEBUG, 'Picking p (%d <= %d <= %d bits)' % (minbits, preferredbits, maxbits)) + raise SSHException( + 'Can\'t do server-side gex with no modulus pack') + self.transport._log( + DEBUG, # noqa + 'Picking p (%d <= %d <= %d bits)' % ( + minbits, preferredbits, maxbits)) self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits) m = Message() m.add_byte(c_MSG_KEXGSS_GROUP) @@ -420,8 +438,10 @@ class KexGSSGex(object): # reject if p's bit length < 1024 or > 8192 bitlen = util.bit_length(self.p) if (bitlen < 1024) or (bitlen > 8192): - raise SSHException('Server-generated gex p (don\'t ask) is out of range (%d bits)' % bitlen) - self.transport._log(DEBUG, 'Got server p (%d bits)' % bitlen) + raise SSHException( + 'Server-generated gex p (don\'t ask) is out of range ' + '(%d bits)' % bitlen) + self.transport._log(DEBUG, 'Got server p (%d bits)' % bitlen) # noqa self._generate_x() # now compute e = g^x mod p self.e = pow(self.g, self.x, self.p) @@ -450,7 +470,8 @@ class KexGSSGex(object): K = pow(self.e, self.x, self.p) self.transport.host_key = NullHostKey() key = self.transport.host_key.__str__() - # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) + # okay, build up the hash H of + # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa hm = Message() hm.add(self.transport.remote_version, self.transport.local_version, self.transport.remote_kex_init, self.transport.local_kex_init, @@ -540,7 +561,8 @@ class KexGSSGex(object): if (self.f < 1) or (self.f > self.p - 1): raise SSHException('Server kex "f" is out of range') K = pow(self.f, self.x, self.p) - # okay, build up the hash H of (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) + # okay, build up the hash H of + # (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa hm = Message() hm.add(self.transport.local_version, self.transport.remote_version, self.transport.local_kex_init, self.transport.remote_kex_init, @@ -581,7 +603,7 @@ class KexGSSGex(object): maj_status = m.get_int() min_status = m.get_int() err_msg = m.get_string() - lang_tag = m.get_string() # we don't care about the language! + m.get_string() # we don't care about the language (lang_tag)! raise SSHException("GSS-API Error:\nMajor Status: %s\nMinor Status: %s\ \nError Message: %s\n") % (str(maj_status), str(min_status), @@ -590,8 +612,9 @@ class KexGSSGex(object): class NullHostKey(object): """ - This class represents the Null Host Key for GSS-API Key Exchange - as defined in `RFC 4462 Section 5 <http://www.ietf.org/rfc/rfc4462.txt>`_ + This class represents the Null Host Key for GSS-API Key Exchange as defined + in `RFC 4462 Section 5 + <https://tools.ietf.org/html/rfc4462.html#section-5>`_ """ def __init__(self): self.key = "" diff --git a/paramiko/message.py b/paramiko/message.py index b893e76d..f8ed6170 100644 --- a/paramiko/message.py +++ b/paramiko/message.py @@ -32,7 +32,7 @@ class Message (object): An SSH2 message is a stream of bytes that encodes some combination of strings, integers, bools, and infinite-precision integers (known in Python as longs). This class builds or breaks down such a byte stream. - + Normally you don't need to deal with anything this low-level, but it's exposed for people implementing custom extensions, or features that paramiko doesn't support yet. @@ -129,7 +129,7 @@ class Message (object): b = self.get_bytes(1) return b != zero_byte - def get_int(self): + def get_adaptive_int(self): """ Fetch an int from the stream. @@ -141,25 +141,9 @@ class Message (object): byte += self.get_bytes(3) return struct.unpack('>I', byte)[0] - def get_size(self): - """ - Fetch an int from the stream. - - @return: a 32-bit unsigned integer. - @rtype: int - """ - byte = self.get_bytes(1) - if byte == max_byte: - return util.inflate_long(self.get_binary()) - byte += self.get_bytes(3) - return struct.unpack('>I', byte)[0] - - def get_size(self): + def get_int(self): """ Fetch an int from the stream. - - @return: a 32-bit unsigned integer. - @rtype: int """ return struct.unpack('>I', self.get_bytes(4))[0] @@ -185,35 +169,26 @@ class Message (object): contain unprintable characters. (It's not unheard of for a string to contain another byte-stream message.) """ - return self.get_bytes(self.get_size()) + return self.get_bytes(self.get_int()) def get_text(self): """ - Fetch a string from the stream. This could be a byte string and may - contain unprintable characters. (It's not unheard of for a string to - contain another byte-stream Message.) - - @return: a string. - @rtype: string + Fetch a Unicode string from the stream. """ - return u(self.get_bytes(self.get_size())) - #return self.get_bytes(self.get_size()) + return u(self.get_string()) def get_binary(self): """ Fetch a string from the stream. This could be a byte string and may contain unprintable characters. (It's not unheard of for a string to contain another byte-stream Message.) - - @return: a string. - @rtype: string """ - return self.get_bytes(self.get_size()) + return self.get_bytes(self.get_int()) def get_list(self): """ Fetch a `list` of `strings <str>` from the stream. - + These are trivially encoded as comma-separated values in a string. """ return self.get_text().split(',') @@ -221,7 +196,7 @@ class Message (object): def add_bytes(self, b): """ Write bytes to the stream, without any formatting. - + :param str b: bytes to add """ self.packet.write(b) @@ -230,7 +205,7 @@ class Message (object): def add_byte(self, b): """ Write a single byte to the stream, without any formatting. - + :param str b: byte to add """ self.packet.write(b) @@ -239,7 +214,7 @@ class Message (object): def add_boolean(self, b): """ Add a boolean value to the stream. - + :param bool b: boolean value to add """ if b: @@ -247,35 +222,21 @@ class Message (object): else: self.packet.write(zero_byte) return self - - def add_size(self, n): - """ - Add an integer to the stream. - - :param int n: integer to add - """ - self.packet.write(struct.pack('>I', n)) - return self - + def add_int(self, n): """ Add an integer to the stream. - + :param int n: integer to add """ - if n >= Message.big_int: - self.packet.write(max_byte) - self.add_string(util.deflate_long(n)) - else: - self.packet.write(struct.pack('>I', n)) + self.packet.write(struct.pack('>I', n)) return self - def add_int(self, n): + def add_adaptive_int(self, n): """ Add an integer to the stream. - @param n: integer to add - @type n: int + :param int n: integer to add """ if n >= Message.big_int: self.packet.write(max_byte) @@ -297,7 +258,7 @@ class Message (object): """ Add a long int to the stream, encoded as an infinite-precision integer. This method only works on positive numbers. - + :param long z: long int to add """ self.add_string(util.deflate_long(z)) @@ -306,11 +267,11 @@ class Message (object): def add_string(self, s): """ Add a string to the stream. - + :param str s: string to add """ s = asbytes(s) - self.add_size(len(s)) + self.add_int(len(s)) self.packet.write(s) return self @@ -319,17 +280,17 @@ class Message (object): Add a list of strings to the stream. They are encoded identically to a single string of values separated by commas. (Yes, really, that's how SSH2 does it.) - + :param list l: list of strings to add """ self.add_string(','.join(l)) return self - + def _add(self, i): if type(i) is bool: return self.add_boolean(i) elif isinstance(i, integer_types): - return self.add_int(i) + return self.add_adaptive_int(i) elif type(i) is list: return self.add_list(i) else: @@ -342,7 +303,7 @@ class Message (object): .. warning:: Longs are encoded non-deterministically. Don't use this method. - + :param seq: the sequence of items """ for item in seq: diff --git a/paramiko/packet.py b/paramiko/packet.py index f516ff9b..95a26c6e 100644 --- a/paramiko/packet.py +++ b/paramiko/packet.py @@ -29,8 +29,10 @@ import time from hmac import HMAC from paramiko import util -from paramiko.common import linefeed_byte, cr_byte_value, asbytes, MSG_NAMES, \ - DEBUG, xffffffff, zero_byte +from paramiko.common import ( + linefeed_byte, cr_byte_value, asbytes, MSG_NAMES, DEBUG, xffffffff, + zero_byte, +) from paramiko.py3compat import u, byte_ord from paramiko.ssh_exception import SSHException, ProxyCommandFailure from paramiko.message import Message @@ -41,9 +43,19 @@ def compute_hmac(key, message, digest_class): class NeedRekeyException (Exception): + """ + Exception indicating a rekey is needed. + """ pass +def first_arg(e): + arg = None + if type(e.args) is tuple and len(e.args) > 0: + arg = e.args[0] + return arg + + class Packetizer (object): """ Implementation of the base SSH packet protocol. @@ -54,8 +66,11 @@ class Packetizer (object): REKEY_PACKETS = pow(2, 29) REKEY_BYTES = pow(2, 29) - REKEY_PACKETS_OVERFLOW_MAX = pow(2, 29) # Allow receiving this many packets after a re-key request before terminating - REKEY_BYTES_OVERFLOW_MAX = pow(2, 29) # Allow receiving this many bytes after a re-key request before terminating + # Allow receiving this many packets after a re-key request before + # terminating + REKEY_PACKETS_OVERFLOW_MAX = pow(2, 29) + # Allow receiving this many bytes after a re-key request before terminating + REKEY_BYTES_OVERFLOW_MAX = pow(2, 29) def __init__(self, socket): self.__socket = socket @@ -99,13 +114,22 @@ class Packetizer (object): self.__keepalive_last = time.time() self.__keepalive_callback = None + self.__timer = None + self.__handshake_complete = False + self.__timer_expired = False + + @property + def closed(self): + return self.__closed + def set_log(self, log): """ Set the Python log object to use for logging. """ self.__logger = log - def set_outbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key, sdctr=False): + def set_outbound_cipher(self, block_engine, block_size, mac_engine, + mac_size, mac_key, sdctr=False): """ Switch outbound data cipher. """ @@ -117,13 +141,15 @@ class Packetizer (object): self.__mac_key_out = mac_key self.__sent_bytes = 0 self.__sent_packets = 0 - # wait until the reset happens in both directions before clearing rekey flag + # wait until the reset happens in both directions before clearing + # rekey flag self.__init_count |= 1 if self.__init_count == 3: self.__init_count = 0 self.__need_rekey = False - def set_inbound_cipher(self, block_engine, block_size, mac_engine, mac_size, mac_key): + def set_inbound_cipher( + self, block_engine, block_size, mac_engine, mac_size, mac_key): """ Switch inbound data cipher. """ @@ -136,7 +162,8 @@ class Packetizer (object): self.__received_packets = 0 self.__received_bytes_overflow = 0 self.__received_packets_overflow = 0 - # wait until the reset happens in both directions before clearing rekey flag + # wait until the reset happens in both directions before clearing + # rekey flag self.__init_count |= 2 if self.__init_count == 3: self.__init_count = 0 @@ -182,6 +209,46 @@ class Packetizer (object): self.__keepalive_callback = callback self.__keepalive_last = time.time() + def read_timer(self): + self.__timer_expired = True + + def start_handshake(self, timeout): + """ + Tells `Packetizer` that the handshake process started. + Starts a book keeping timer that can signal a timeout in the + handshake process. + + :param float timeout: amount of seconds to wait before timing out + """ + if not self.__timer: + self.__timer = threading.Timer(float(timeout), self.read_timer) + self.__timer.start() + + def handshake_timed_out(self): + """ + Checks if the handshake has timed out. + + If `start_handshake` wasn't called before the call to this function, + the return value will always be `False`. If the handshake completed + before a timeout was reached, the return value will be `False` + + :return: handshake time out status, as a `bool` + """ + if not self.__timer: + return False + if self.__handshake_complete: + return False + return self.__timer_expired + + def complete_handshake(self): + """ + Tells `Packetizer` that the handshake has completed. + """ + if self.__timer: + self.__timer.cancel() + self.__timer_expired = False + self.__handshake_complete = True + def read_all(self, n, check_rekey=False): """ Read as close to N bytes as possible, blocking as long as necessary. @@ -189,8 +256,9 @@ class Packetizer (object): :param int n: number of bytes to read :return: the data read, as a `str` - :raises EOFError: - if the socket was closed before all the bytes could be read + :raises: + ``EOFError`` -- if the socket was closed before all the bytes could + be read """ out = bytes() # handle over-reading from reading the banner line @@ -200,6 +268,8 @@ class Packetizer (object): n -= len(out) while n > 0: got_timeout = False + if self.handshake_timed_out(): + raise EOFError() try: x = self.__socket.recv(n) if len(x) == 0: @@ -212,9 +282,10 @@ class Packetizer (object): # on Linux, sometimes instead of socket.timeout, we get # EAGAIN. this is a bug in recent (> 2.6.9) kernels but # we need to work around it. - if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN): + arg = first_arg(e) + if arg == errno.EAGAIN: got_timeout = True - elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR): + elif arg == errno.EINTR: # syscall interrupted; try again pass elif self.__closed: @@ -239,9 +310,10 @@ class Packetizer (object): except socket.timeout: retry_write = True except socket.error as e: - if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN): + arg = first_arg(e) + if arg == errno.EAGAIN: retry_write = True - elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR): + elif arg == errno.EINTR: # syscall interrupted; try again retry_write = True else: @@ -257,11 +329,11 @@ class Packetizer (object): n = -1 else: if n == 0 and iteration_with_zero_as_return_value > 10: - # We shouldn't retry the write, but we didn't - # manage to send anything over the socket. This might be an - # indication that we have lost contact with the remote side, - # but are yet to receive an EOFError or other socket errors. - # Let's give it some iteration to try and catch up. + # We shouldn't retry the write, but we didn't + # manage to send anything over the socket. This might be an + # indication that we have lost contact with the remote + # side, but are yet to receive an EOFError or other socket + # errors. Let's give it some iteration to try and catch up. n = -1 iteration_with_zero_as_return_value += 1 if n < 0: @@ -277,7 +349,7 @@ class Packetizer (object): line, so it's okay to attempt large reads. """ buf = self.__remainder - while not linefeed_byte in buf: + while linefeed_byte not in buf: buf += self._read_timeout(timeout) n = buf.index(linefeed_byte) self.__remainder = buf[n + 1:] @@ -304,23 +376,33 @@ class Packetizer (object): data = self.__compress_engine_out(data) packet = self._build_packet(data) if self.__dump_packets: - self._log(DEBUG, 'Write packet <%s>, length %d' % (cmd_name, orig_len)) + self._log( + DEBUG, + 'Write packet <%s>, length %d' % (cmd_name, orig_len)) self._log(DEBUG, util.format_binary(packet, 'OUT: ')) if self.__block_engine_out is not None: - out = self.__block_engine_out.encrypt(packet) + out = self.__block_engine_out.update(packet) else: out = packet # + mac if self.__block_engine_out is not None: - payload = struct.pack('>I', self.__sequence_number_out) + packet - out += compute_hmac(self.__mac_key_out, payload, self.__mac_engine_out)[:self.__mac_size_out] - self.__sequence_number_out = (self.__sequence_number_out + 1) & xffffffff + payload = struct.pack( + '>I', self.__sequence_number_out) + packet + out += compute_hmac( + self.__mac_key_out, + payload, + self.__mac_engine_out)[:self.__mac_size_out] + self.__sequence_number_out = \ + (self.__sequence_number_out + 1) & xffffffff self.write_all(out) self.__sent_bytes += len(out) self.__sent_packets += 1 - if (self.__sent_packets >= self.REKEY_PACKETS or self.__sent_bytes >= self.REKEY_BYTES)\ - and not self.__need_rekey: + sent_too_much = ( + self.__sent_packets >= self.REKEY_PACKETS or + self.__sent_bytes >= self.REKEY_BYTES + ) + if sent_too_much and not self.__need_rekey: # only ask once for rekeying self._log(DEBUG, 'Rekeying (hit %d packets, %d bytes sent)' % (self.__sent_packets, self.__sent_bytes)) @@ -335,16 +417,17 @@ class Packetizer (object): Only one thread should ever be in this function (no other locking is done). - :raises SSHException: if the packet is mangled - :raises NeedRekeyException: if the transport should rekey + :raises: `.SSHException` -- if the packet is mangled + :raises: `.NeedRekeyException` -- if the transport should rekey """ header = self.read_all(self.__block_size_in, check_rekey=True) if self.__block_engine_in is not None: - header = self.__block_engine_in.decrypt(header) + header = self.__block_engine_in.update(header) if self.__dump_packets: self._log(DEBUG, util.format_binary(header, 'IN: ')) packet_size = struct.unpack('>I', header[:4])[0] - # leftover contains decrypted bytes from the first block (after the length field) + # leftover contains decrypted bytes from the first block (after the + # length field) leftover = header[4:] if (packet_size - len(leftover)) % self.__block_size_in != 0: raise SSHException('Invalid packet blocking') @@ -352,22 +435,28 @@ class Packetizer (object): packet = buf[:packet_size - len(leftover)] post_packet = buf[packet_size - len(leftover):] if self.__block_engine_in is not None: - packet = self.__block_engine_in.decrypt(packet) + packet = self.__block_engine_in.update(packet) if self.__dump_packets: self._log(DEBUG, util.format_binary(packet, 'IN: ')) packet = leftover + packet if self.__mac_size_in > 0: mac = post_packet[:self.__mac_size_in] - mac_payload = struct.pack('>II', self.__sequence_number_in, packet_size) + packet - my_mac = compute_hmac(self.__mac_key_in, mac_payload, self.__mac_engine_in)[:self.__mac_size_in] + mac_payload = struct.pack( + '>II', self.__sequence_number_in, packet_size) + packet + my_mac = compute_hmac( + self.__mac_key_in, + mac_payload, + self.__mac_engine_in)[:self.__mac_size_in] if not util.constant_time_bytes_eq(my_mac, mac): raise SSHException('Mismatched MAC') padding = byte_ord(packet[0]) payload = packet[1:packet_size - padding] if self.__dump_packets: - self._log(DEBUG, 'Got payload (%d bytes, %d padding)' % (packet_size, padding)) + self._log( + DEBUG, + 'Got payload (%d bytes, %d padding)' % (packet_size, padding)) if self.__compress_engine_in is not None: payload = self.__compress_engine_in(payload) @@ -385,9 +474,12 @@ class Packetizer (object): # dropping the connection self.__received_bytes_overflow += raw_packet_size self.__received_packets_overflow += 1 - if (self.__received_packets_overflow >= self.REKEY_PACKETS_OVERFLOW_MAX) or \ - (self.__received_bytes_overflow >= self.REKEY_BYTES_OVERFLOW_MAX): - raise SSHException('Remote transport is ignoring rekey requests') + if (self.__received_packets_overflow >= + self.REKEY_PACKETS_OVERFLOW_MAX) or \ + (self.__received_bytes_overflow >= + self.REKEY_BYTES_OVERFLOW_MAX): + raise SSHException( + 'Remote transport is ignoring rekey requests') elif (self.__received_packets >= self.REKEY_PACKETS) or \ (self.__received_bytes >= self.REKEY_BYTES): # only ask once for rekeying @@ -403,10 +495,12 @@ class Packetizer (object): else: cmd_name = '$%x' % cmd if self.__dump_packets: - self._log(DEBUG, 'Read packet <%s>, length %d' % (cmd_name, len(payload))) + self._log( + DEBUG, + 'Read packet <%s>, length %d' % (cmd_name, len(payload))) return cmd, msg - ########## protected + # ...protected... def _log(self, level, msg): if self.__logger is None: @@ -418,8 +512,11 @@ class Packetizer (object): self.__logger.log(level, msg) def _check_keepalive(self): - if (not self.__keepalive_interval) or (not self.__block_engine_out) or \ - self.__need_rekey: + if ( + not self.__keepalive_interval or + not self.__block_engine_out or + self.__need_rekey + ): # wait till we're encrypting, and not in the middle of rekeying return now = time.time() @@ -438,8 +535,7 @@ class Packetizer (object): except socket.timeout: pass except EnvironmentError as e: - if (type(e.args) is tuple and len(e.args) > 0 and - e.args[0] == errno.EINTR): + if first_arg(e) == errno.EINTR: pass else: raise @@ -457,7 +553,8 @@ class Packetizer (object): packet = struct.pack('>IB', len(payload) + padding + 1, padding) packet += payload if self.__sdctr_out or self.__block_engine_out is None: - # cute trick i caught openssh doing: if we're not encrypting or SDCTR mode (RFC4344), + # cute trick i caught openssh doing: if we're not encrypting or + # SDCTR mode (RFC4344), # don't waste random bytes for the padding packet += (zero_byte * padding) else: diff --git a/paramiko/pipe.py b/paramiko/pipe.py index 4f62d7c5..6ca37703 100644 --- a/paramiko/pipe.py +++ b/paramiko/pipe.py @@ -28,7 +28,6 @@ will trigger as readable in `select <select.select>`. import sys import os import socket -from paramiko.py3compat import b def make_pipe(): @@ -45,13 +44,13 @@ class PosixPipe (object): self._set = False self._forever = False self._closed = False - + def close(self): os.close(self._rfd) os.close(self._wfd) # used for unit tests: self._closed = True - + def fileno(self): return self._rfd @@ -60,13 +59,13 @@ class PosixPipe (object): return os.read(self._rfd, 1) self._set = False - + def set(self): if self._set or self._closed: return self._set = True os.write(self._wfd, b'*') - + def set_forever(self): self._forever = True self.set() @@ -81,39 +80,39 @@ class WindowsPipe (object): serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serv.bind(('127.0.0.1', 0)) serv.listen(1) - + # need to save sockets in _rsock/_wsock so they don't get closed self._rsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._rsock.connect(('127.0.0.1', serv.getsockname()[1])) - + self._wsock, addr = serv.accept() serv.close() self._set = False self._forever = False self._closed = False - + def close(self): self._rsock.close() self._wsock.close() # used for unit tests: self._closed = True - + def fileno(self): return self._rsock.fileno() - def clear (self): + def clear(self): if not self._set or self._forever: return self._rsock.recv(1) self._set = False - - def set (self): + + def set(self): if self._set or self._closed: return self._set = True self._wsock.send(b'*') - def set_forever (self): + def set_forever(self): self._forever = True self.set() @@ -123,12 +122,12 @@ class OrPipe (object): self._set = False self._partner = None self._pipe = pipe - + def set(self): self._set = True if not self._partner._set: self._pipe.set() - + def clear(self): self._set = False if not self._partner._set: @@ -146,4 +145,3 @@ def make_or_pipe(pipe): p1._partner = p2 p2._partner = p1 return p1, p2 - diff --git a/paramiko/pkey.py b/paramiko/pkey.py index 1b4af010..f5b0cd18 100644 --- a/paramiko/pkey.py +++ b/paramiko/pkey.py @@ -21,27 +21,39 @@ Common API for all public keys. """ import base64 -from binascii import hexlify, unhexlify +from binascii import unhexlify import os from hashlib import md5 -from Crypto.Cipher import DES3, AES +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.ciphers import algorithms, modes, Cipher from paramiko import util -from paramiko.common import o600, zero_byte +from paramiko.common import o600 from paramiko.py3compat import u, encodebytes, decodebytes, b from paramiko.ssh_exception import SSHException, PasswordRequiredException -class PKey (object): +class PKey(object): """ Base class for public keys. """ # known encryption types for private key files: _CIPHER_TABLE = { - 'AES-128-CBC': {'cipher': AES, 'keysize': 16, 'blocksize': 16, 'mode': AES.MODE_CBC}, - 'DES-EDE3-CBC': {'cipher': DES3, 'keysize': 24, 'blocksize': 8, 'mode': DES3.MODE_CBC}, + 'AES-128-CBC': { + 'cipher': algorithms.AES, + 'keysize': 16, + 'blocksize': 16, + 'mode': modes.CBC + }, + 'DES-EDE3-CBC': { + 'cipher': algorithms.TripleDES, + 'keysize': 24, + 'blocksize': 8, + 'mode': modes.CBC + }, } def __init__(self, msg=None, data=None): @@ -53,9 +65,10 @@ class PKey (object): :param .Message msg: an optional SSH `.Message` containing a public key of this type. - :param str data: an optional string containing a public key of this type + :param str data: an optional string containing a public key + of this type - :raises SSHException: + :raises: `.SSHException` -- if a key cannot be created from the ``data`` or ``msg`` given, or no key was passed in. """ @@ -73,6 +86,8 @@ class PKey (object): return self.asbytes() # noinspection PyUnresolvedReferences + # TODO: The comparison functions should be removed as per: + # https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons def __cmp__(self, other): """ Compare this key to another. Returns 0 if this key is equivalent to @@ -80,13 +95,13 @@ class PKey (object): of the key are compared, so a public key will compare equal to its corresponding private key. - :param .Pkey other: key to compare to. + :param .PKey other: key to compare to. """ hs = hash(self) ho = hash(other) if hs != ho: - return cmp(hs, ho) - return cmp(self.asbytes(), other.asbytes()) + return cmp(hs, ho) # noqa + return cmp(self.asbytes(), other.asbytes()) # noqa def __eq__(self, other): return hash(self) == hash(other) @@ -171,14 +186,15 @@ class PKey (object): is useless on the abstract PKey class. :param str filename: name of the file to read - :param str password: an optional password to use to decrypt the key file, - if it's encrypted + :param str password: + an optional password to use to decrypt the key file, if it's + encrypted :return: a new `.PKey` based on the given private key - :raises IOError: if there was an error reading the file - :raises PasswordRequiredException: if the private key file is + :raises: ``IOError`` -- if there was an error reading the file + :raises: `.PasswordRequiredException` -- if the private key file is encrypted, and ``password`` is ``None`` - :raises SSHException: if the key file is invalid + :raises: `.SSHException` -- if the key file is invalid """ key = cls(filename=filename, password=password) return key @@ -187,19 +203,19 @@ class PKey (object): def from_private_key(cls, file_obj, password=None): """ Create a key object by reading a private key from a file (or file-like) - object. If the private key is encrypted and ``password`` is not ``None``, - the given password will be used to decrypt the key (otherwise + object. If the private key is encrypted and ``password`` is not + ``None``, the given password will be used to decrypt the key (otherwise `.PasswordRequiredException` is thrown). - :param file file_obj: the file to read from + :param file_obj: the file-like object to read from :param str password: an optional password to use to decrypt the key, if it's encrypted :return: a new `.PKey` based on the given private key - :raises IOError: if there was an error reading the key - :raises PasswordRequiredException: if the private key file is encrypted, - and ``password`` is ``None`` - :raises SSHException: if the key file is invalid + :raises: ``IOError`` -- if there was an error reading the key + :raises: `.PasswordRequiredException` -- + if the private key file is encrypted, and ``password`` is ``None`` + :raises: `.SSHException` -- if the key file is invalid """ key = cls(file_obj=file_obj, password=password) return key @@ -213,8 +229,8 @@ class PKey (object): :param str password: an optional password to use to encrypt the key file - :raises IOError: if there was an error writing the file - :raises SSHException: if the key is invalid + :raises: ``IOError`` -- if there was an error writing the file + :raises: `.SSHException` -- if the key is invalid """ raise Exception('Not implemented in PKey') @@ -223,11 +239,11 @@ class PKey (object): Write private key contents into a file (or file-like) object. If the password is not ``None``, the key is encrypted before writing. - :param file file_obj: the file object to write into + :param file_obj: the file-like object to write into :param str password: an optional password to use to encrypt the key - :raises IOError: if there was an error writing to the file - :raises SSHException: if the key is invalid + :raises: ``IOError`` -- if there was an error writing to the file + :raises: `.SSHException` -- if the key is invalid """ raise Exception('Not implemented in PKey') @@ -236,20 +252,21 @@ class PKey (object): Read an SSH2-format private key file, looking for a string of the type ``"BEGIN xxx PRIVATE KEY"`` for some ``xxx``, base64-decode the text we find, and return it as a string. If the private key is encrypted and - ``password`` is not ``None``, the given password will be used to decrypt - the key (otherwise `.PasswordRequiredException` is thrown). + ``password`` is not ``None``, the given password will be used to + decrypt the key (otherwise `.PasswordRequiredException` is thrown). - :param str tag: ``"RSA"`` or ``"DSA"``, the tag used to mark the data block. + :param str tag: ``"RSA"`` or ``"DSA"``, the tag used to mark the + data block. :param str filename: name of the file to read. :param str password: an optional password to use to decrypt the key file, if it's encrypted. :return: data blob (`str`) that makes up the private key. - :raises IOError: if there was an error reading the file. - :raises PasswordRequiredException: if the private key file is + :raises: ``IOError`` -- if there was an error reading the file. + :raises: `.PasswordRequiredException` -- if the private key file is encrypted, and ``password`` is ``None``. - :raises SSHException: if the key file is invalid. + :raises: `.SSHException` -- if the key file is invalid. """ with open(filename, 'r') as f: data = self._read_private_key(tag, f, password) @@ -258,7 +275,8 @@ class PKey (object): def _read_private_key(self, tag, f, password=None): lines = f.readlines() start = 0 - while (start < len(lines)) and (lines[start].strip() != '-----BEGIN ' + tag + ' PRIVATE KEY-----'): + beginning_of_key = '-----BEGIN ' + tag + ' PRIVATE KEY-----' + while start < len(lines) and lines[start].strip() != beginning_of_key: start += 1 if start >= len(lines): raise SSHException('not a valid ' + tag + ' private key file') @@ -273,7 +291,8 @@ class PKey (object): start += 1 # find end end = start - while (lines[end].strip() != '-----END ' + tag + ' PRIVATE KEY-----') and (end < len(lines)): + ending_of_key = '-----END ' + tag + ' PRIVATE KEY-----' + while end < len(lines) and lines[end].strip() != ending_of_key: end += 1 # if we trudged to the end of the file, just try to cope. try: @@ -285,14 +304,17 @@ class PKey (object): return data # encrypted keyfile: will need a password if headers['proc-type'] != '4,ENCRYPTED': - raise SSHException('Unknown private key structure "%s"' % headers['proc-type']) + raise SSHException( + 'Unknown private key structure "%s"' % headers['proc-type']) try: encryption_type, saltstr = headers['dek-info'].split(',') except: raise SSHException("Can't parse DEK-info in private key file") if encryption_type not in self._CIPHER_TABLE: - raise SSHException('Unknown private key cipher "%s"' % encryption_type) - # if no password was passed in, raise an exception pointing out that we need one + raise SSHException( + 'Unknown private key cipher "%s"' % encryption_type) + # if no password was passed in, + # raise an exception pointing out that we need one if password is None: raise PasswordRequiredException('Private key file is encrypted') cipher = self._CIPHER_TABLE[encryption_type]['cipher'] @@ -300,50 +322,38 @@ class PKey (object): mode = self._CIPHER_TABLE[encryption_type]['mode'] salt = unhexlify(b(saltstr)) key = util.generate_key_bytes(md5, salt, password, keysize) - return cipher.new(key, mode, salt).decrypt(data) + decryptor = Cipher( + cipher(key), mode(salt), backend=default_backend() + ).decryptor() + return decryptor.update(data) + decryptor.finalize() - def _write_private_key_file(self, tag, filename, data, password=None): + def _write_private_key_file(self, filename, key, format, password=None): """ Write an SSH2-format private key file in a form that can be read by paramiko or openssh. If no password is given, the key is written in a trivially-encoded format (base64) which is completely insecure. If a password is given, DES-EDE3-CBC is used. - :param str tag: ``"RSA"`` or ``"DSA"``, the tag used to mark the data block. - :param file filename: name of the file to write. + :param str tag: + ``"RSA"`` or ``"DSA"``, the tag used to mark the data block. + :param filename: name of the file to write. :param str data: data blob that makes up the private key. :param str password: an optional password to use to encrypt the file. - :raises IOError: if there was an error writing the file. + :raises: ``IOError`` -- if there was an error writing the file. """ - with open(filename, 'w', o600) as f: - # grrr... the mode doesn't always take hold + with open(filename, 'w') as f: os.chmod(filename, o600) - self._write_private_key(tag, f, data, password) - - def _write_private_key(self, tag, f, data, password=None): - f.write('-----BEGIN %s PRIVATE KEY-----\n' % tag) - if password is not None: - cipher_name = list(self._CIPHER_TABLE.keys())[0] - cipher = self._CIPHER_TABLE[cipher_name]['cipher'] - keysize = self._CIPHER_TABLE[cipher_name]['keysize'] - blocksize = self._CIPHER_TABLE[cipher_name]['blocksize'] - mode = self._CIPHER_TABLE[cipher_name]['mode'] - salt = os.urandom(blocksize) - key = util.generate_key_bytes(md5, salt, password, keysize) - if len(data) % blocksize != 0: - n = blocksize - len(data) % blocksize - #data += os.urandom(n) - # that would make more sense ^, but it confuses openssh. - data += zero_byte * n - data = cipher.new(key, mode, salt).encrypt(data) - f.write('Proc-Type: 4,ENCRYPTED\n') - f.write('DEK-Info: %s,%s\n' % (cipher_name, u(hexlify(salt)).upper())) - f.write('\n') - s = u(encodebytes(data)) - # re-wrap to 64-char lines - s = ''.join(s.split('\n')) - s = '\n'.join([s[i: i + 64] for i in range(0, len(s), 64)]) - f.write(s) - f.write('\n') - f.write('-----END %s PRIVATE KEY-----\n' % tag) + self._write_private_key(f, key, format) + + def _write_private_key(self, f, key, format, password=None): + if password is None: + encryption = serialization.NoEncryption() + else: + encryption = serialization.BestEncryption(password) + + f.write(key.private_bytes( + serialization.Encoding.PEM, + format, + encryption + ).decode()) diff --git a/paramiko/primes.py b/paramiko/primes.py index 7415c182..48a34e53 100644 --- a/paramiko/primes.py +++ b/paramiko/primes.py @@ -25,7 +25,7 @@ import os from paramiko import util from paramiko.py3compat import byte_mask, long from paramiko.ssh_exception import SSHException -from paramiko.common import * +from paramiko.common import * # noqa def _roll_random(n): @@ -62,7 +62,8 @@ class ModulusPack (object): self.discarded = [] def _parse_modulus(self, line): - timestamp, mod_type, tests, tries, size, generator, modulus = line.split() + timestamp, mod_type, tests, tries, size, generator, modulus = \ + line.split() mod_type = int(mod_type) tests = int(tests) tries = int(tries) @@ -74,8 +75,13 @@ class ModulusPack (object): # type 2 (meets basic structural requirements) # test 4 (more than just a small-prime sieve) # tries < 100 if test & 4 (at least 100 tries of miller-rabin) - if (mod_type < 2) or (tests < 4) or ((tests & 4) and (tests < 8) and (tries < 100)): - self.discarded.append((modulus, 'does not meet basic requirements')) + if ( + mod_type < 2 or + tests < 4 or + (tests & 4 and tests < 8 and tries < 100) + ): + self.discarded.append( + (modulus, 'does not meet basic requirements')) return if generator == 0: generator = 2 @@ -85,7 +91,8 @@ class ModulusPack (object): # this is okay. bl = util.bit_length(modulus) if (bl != size) and (bl != size + 1): - self.discarded.append((modulus, 'incorrectly reported bit length %d' % size)) + self.discarded.append( + (modulus, 'incorrectly reported bit length %d' % size)) return if bl not in self.pack: self.pack[bl] = [] @@ -113,12 +120,12 @@ class ModulusPack (object): good = -1 # find nearest bitsize >= preferred for b in bitsizes: - if (b >= prefer) and (b < max) and (b < good or good == -1): + if (b >= prefer) and (b <= max) and (b < good or good == -1): good = b # if that failed, find greatest bitsize >= min if good == -1: for b in bitsizes: - if (b >= min) and (b < max) and (b > good): + if (b >= min) and (b <= max) and (b > good): good = b if good == -1: # their entire (min, max) range has no intersection with our range. diff --git a/paramiko/proxy.py b/paramiko/proxy.py index ca602c4c..c4ec627c 100644 --- a/paramiko/proxy.py +++ b/paramiko/proxy.py @@ -17,11 +17,9 @@ # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -from datetime import datetime import os from shlex import split as shlsplit import signal -from subprocess import Popen, PIPE from select import select import socket import time @@ -38,7 +36,7 @@ class ProxyCommand(ClosingContextManager): `.Transport` and `.Packetizer` classes. Using this class instead of a regular socket makes it possible to talk with a Popen'd command that will proxy traffic between the client and a server hosted in another machine. - + Instances of this class may be used as context managers. """ def __init__(self, command_line): @@ -49,10 +47,13 @@ class ProxyCommand(ClosingContextManager): :param str command_line: the command that should be executed and used as the proxy. """ + # NOTE: subprocess import done lazily so platforms without it (e.g. + # GAE) can still import us during overall Paramiko load. + from subprocess import Popen, PIPE self.cmd = shlsplit(command_line) - self.process = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) + self.process = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, + bufsize=0) self.timeout = None - self.buffer = [] def send(self, content): """ @@ -77,11 +78,12 @@ class ProxyCommand(ClosingContextManager): :param int size: how many chars should be read - :return: the length of the read content, as an `int` + :return: the string of bytes read, which may be shorter than requested """ try: + buffer = b'' start = time.time() - while len(self.buffer) < size: + while len(buffer) < size: select_timeout = None if self.timeout is not None: elapsed = (time.time() - start) @@ -92,16 +94,13 @@ class ProxyCommand(ClosingContextManager): r, w, x = select( [self.process.stdout], [], [], select_timeout) if r and r[0] == self.process.stdout: - b = os.read( - self.process.stdout.fileno(), size - len(self.buffer)) - # Store in class-level buffer for persistence across - # timeouts; this makes us act more like a real socket - # (where timeouts don't actually drop data.) - self.buffer.extend(b) - result = ''.join(self.buffer) - self.buffer = [] - return result + buffer += os.read( + self.process.stdout.fileno(), size - len(buffer)) + return buffer except socket.timeout: + if buffer: + # Don't raise socket.timeout, return partial result instead + return buffer raise # socket.timeout is a subclass of IOError except IOError as e: raise ProxyCommandFailure(' '.join(self.cmd), e.strerror) @@ -109,5 +108,14 @@ class ProxyCommand(ClosingContextManager): def close(self): os.kill(self.process.pid, signal.SIGTERM) + @property + def closed(self): + return self.process.returncode is not None + + @property + def _closed(self): + # Concession to Python 3 socket-like API + return self.closed + def settimeout(self, timeout): self.timeout = timeout diff --git a/paramiko/py3compat.py b/paramiko/py3compat.py index 57c096b2..095b0d09 100644 --- a/paramiko/py3compat.py +++ b/paramiko/py3compat.py @@ -1,26 +1,30 @@ import sys import base64 -__all__ = ['PY2', 'string_types', 'integer_types', 'text_type', 'bytes_types', 'bytes', 'long', 'input', - 'decodebytes', 'encodebytes', 'bytestring', 'byte_ord', 'byte_chr', 'byte_mask', - 'b', 'u', 'b2s', 'StringIO', 'BytesIO', 'is_callable', 'MAXSIZE', 'next'] +__all__ = ['PY2', 'string_types', 'integer_types', 'text_type', 'bytes_types', + 'bytes', 'long', 'input', 'decodebytes', 'encodebytes', + 'bytestring', 'byte_ord', 'byte_chr', 'byte_mask', 'b', 'u', 'b2s', + 'StringIO', 'BytesIO', 'is_callable', 'MAXSIZE', + 'next', 'builtins'] PY2 = sys.version_info[0] < 3 if PY2: - string_types = basestring - text_type = unicode + string_types = basestring # NOQA + text_type = unicode # NOQA bytes_types = str bytes = str - integer_types = (int, long) - long = long - input = raw_input + integer_types = (int, long) # NOQA + long = long # NOQA + input = raw_input # NOQA decodebytes = base64.decodestring encodebytes = base64.encodestring + import __builtin__ as builtins + def bytestring(s): # NOQA - if isinstance(s, unicode): + if isinstance(s, unicode): # NOQA return s.encode('utf-8') return s @@ -37,9 +41,9 @@ if PY2: """cast unicode or bytes to bytes""" if isinstance(s, str): return s - elif isinstance(s, unicode): + elif isinstance(s, unicode): # NOQA return s.encode(encoding) - elif isinstance(s, buffer): + elif isinstance(s, buffer): # NOQA return s else: raise TypeError("Expected unicode or bytes, got %r" % s) @@ -49,9 +53,9 @@ if PY2: """cast bytes or unicode to unicode""" if isinstance(s, str): return s.decode(encoding) - elif isinstance(s, unicode): + elif isinstance(s, unicode): # NOQA return s - elif isinstance(s, buffer): + elif isinstance(s, buffer): # NOQA return s.decode(encoding) else: raise TypeError("Expected unicode or bytes, got %r" % s) @@ -102,6 +106,7 @@ if PY2: else: import collections import struct + import builtins string_types = str text_type = str bytes = bytes diff --git a/paramiko/resource.py b/paramiko/resource.py index 9809afbe..5fed22ad 100644 --- a/paramiko/resource.py +++ b/paramiko/resource.py @@ -27,30 +27,30 @@ class ResourceManager (object): """ A registry of objects and resources that should be closed when those objects are deleted. - + This is meant to be a safer alternative to Python's ``__del__`` method, which can cause reference cycles to never be collected. Objects registered with the ResourceManager can be collected but still free resources when they die. - + Resources are registered using `register`, and when an object is garbage collected, each registered resource is closed by having its ``close()`` method called. Multiple resources may be registered per object, but a resource will only be closed once, even if multiple objects register it. (The last object to register it wins.) """ - + def __init__(self): self._table = {} - + def register(self, obj, resource): """ Register a resource to be closed with an object is collected. - + When the given ``obj`` is garbage-collected by the Python interpreter, - the ``resource`` will be closed by having its ``close()`` method called. - Any exceptions are ignored. - + the ``resource`` will be closed by having its ``close()`` method + called. Any exceptions are ignored. + :param object obj: the object to track :param object resource: the resource to close when the object is collected diff --git a/paramiko/rsakey.py b/paramiko/rsakey.py index 4ebd8354..b5107515 100644 --- a/paramiko/rsakey.py +++ b/paramiko/rsakey.py @@ -20,34 +20,26 @@ RSA keys. """ -import os -from hashlib import sha1 +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import rsa, padding -from Crypto.PublicKey import RSA - -from paramiko import util -from paramiko.common import max_byte, zero_byte, one_byte from paramiko.message import Message -from paramiko.ber import BER, BERException from paramiko.pkey import PKey -from paramiko.py3compat import long +from paramiko.py3compat import PY2 from paramiko.ssh_exception import SSHException -SHA1_DIGESTINFO = b'\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14' - -class RSAKey (PKey): +class RSAKey(PKey): """ Representation of an RSA key which can be used to sign and verify SSH2 data. """ - def __init__(self, msg=None, data=None, filename=None, password=None, vals=None, file_obj=None): - self.n = None - self.e = None - self.d = None - self.p = None - self.q = None + def __init__(self, msg=None, data=None, filename=None, password=None, + key=None, file_obj=None): + self.key = None if file_obj is not None: self._from_private_key(file_obj, password) return @@ -56,32 +48,50 @@ class RSAKey (PKey): return if (msg is None) and (data is not None): msg = Message(data) - if vals is not None: - self.e, self.n = vals + if key is not None: + self.key = key else: if msg is None: raise SSHException('Key object may not be empty') if msg.get_text() != 'ssh-rsa': raise SSHException('Invalid key') - self.e = msg.get_mpint() - self.n = msg.get_mpint() - self.size = util.bit_length(self.n) + self.key = rsa.RSAPublicNumbers( + e=msg.get_mpint(), n=msg.get_mpint() + ).public_key(default_backend()) + + @property + def size(self): + return self.key.key_size + + @property + def public_numbers(self): + if isinstance(self.key, rsa.RSAPrivateKey): + return self.key.private_numbers().public_numbers + else: + return self.key.public_numbers() def asbytes(self): m = Message() m.add_string('ssh-rsa') - m.add_mpint(self.e) - m.add_mpint(self.n) + m.add_mpint(self.public_numbers.e) + m.add_mpint(self.public_numbers.n) return m.asbytes() def __str__(self): - return self.asbytes() + # NOTE: as per inane commentary in #853, this appears to be the least + # crummy way to get a representation that prints identical to Python + # 2's previous behavior, on both interpreters. + # TODO: replace with a nice clean fingerprint display or something + if PY2: + # Can't just return the .decode below for Py2 because stuff still + # tries stuffing it into ASCII for whatever godforsaken reason + return self.asbytes() + else: + return self.asbytes().decode('utf8', errors='ignore') def __hash__(self): - h = hash(self.get_name()) - h = h * 37 + hash(self.e) - h = h * 37 + hash(self.n) - return hash(h) + return hash((self.get_name(), self.public_numbers.e, + self.public_numbers.n)) def get_name(self): return 'ssh-rsa' @@ -90,12 +100,16 @@ class RSAKey (PKey): return self.size def can_sign(self): - return self.d is not None + return isinstance(self.key, rsa.RSAPrivateKey) def sign_ssh_data(self, data): - digest = sha1(data).digest() - rsa = RSA.construct((long(self.n), long(self.e), long(self.d))) - sig = util.deflate_long(rsa.sign(self._pkcs1imify(digest), bytes())[0], 0) + signer = self.key.signer( + padding=padding.PKCS1v15(), + algorithm=hashes.SHA1(), + ) + signer.update(data) + sig = signer.finalize() + m = Message() m.add_string('ssh-rsa') m.add_string(sig) @@ -104,32 +118,38 @@ class RSAKey (PKey): def verify_ssh_sig(self, data, msg): if msg.get_text() != 'ssh-rsa': return False - sig = util.inflate_long(msg.get_binary(), True) - # verify the signature by SHA'ing the data and encrypting it using the - # public key. some wackiness ensues where we "pkcs1imify" the 20-byte - # hash into a string as long as the RSA key. - hash_obj = util.inflate_long(self._pkcs1imify(sha1(data).digest()), True) - rsa = RSA.construct((long(self.n), long(self.e))) - return rsa.verify(hash_obj, (sig,)) - - def _encode_key(self): - if (self.p is None) or (self.q is None): - raise SSHException('Not enough key info to write private key file') - keylist = [0, self.n, self.e, self.d, self.p, self.q, - self.d % (self.p - 1), self.d % (self.q - 1), - util.mod_inverse(self.q, self.p)] + key = self.key + if isinstance(key, rsa.RSAPrivateKey): + key = key.public_key() + + verifier = key.verifier( + signature=msg.get_binary(), + padding=padding.PKCS1v15(), + algorithm=hashes.SHA1(), + ) + verifier.update(data) try: - b = BER() - b.encode(keylist) - except BERException: - raise SSHException('Unable to create ber encoding of key') - return b.asbytes() + verifier.verify() + except InvalidSignature: + return False + else: + return True def write_private_key_file(self, filename, password=None): - self._write_private_key_file('RSA', filename, self._encode_key(), password) + self._write_private_key_file( + filename, + self.key, + serialization.PrivateFormat.TraditionalOpenSSL, + password=password + ) def write_private_key(self, file_obj, password=None): - self._write_private_key('RSA', file_obj, self._encode_key(), password) + self._write_private_key( + file_obj, + self.key, + serialization.PrivateFormat.TraditionalOpenSSL, + password=password + ) @staticmethod def generate(bits, progress_func=None): @@ -138,28 +158,15 @@ class RSAKey (PKey): generate a new host key or authentication key. :param int bits: number of bits the generated key should be. - :param function progress_func: - an optional function to call at key points in key generation (used - by ``pyCrypto.PublicKey``). + :param progress_func: Unused :return: new `.RSAKey` private key """ - rsa = RSA.generate(bits, os.urandom, progress_func) - key = RSAKey(vals=(rsa.e, rsa.n)) - key.d = rsa.d - key.p = rsa.p - key.q = rsa.q - return key - - ### internals... + key = rsa.generate_private_key( + public_exponent=65537, key_size=bits, backend=default_backend() + ) + return RSAKey(key=key) - def _pkcs1imify(self, data): - """ - turn a 20-byte SHA1 hash into a blob of data as large as the key's N, - using PKCS1's \"emsa-pkcs1-v1_5\" encoding. totally bizarre. - """ - size = len(util.deflate_long(self.n, 0)) - filler = max_byte * (size - len(SHA1_DIGESTINFO) - len(data) - 3) - return zero_byte + one_byte + filler + zero_byte + SHA1_DIGESTINFO + data + # ...internals... def _from_private_key_file(self, filename, password): data = self._read_private_key_file('RSA', filename, password) @@ -170,18 +177,12 @@ class RSAKey (PKey): self._decode_key(data) def _decode_key(self, data): - # private key file contains: - # RSAPrivateKey = { version = 0, n, e, d, p, q, d mod p-1, d mod q-1, q**-1 mod p } try: - keylist = BER(data).decode() - except BERException: - raise SSHException('Unable to parse key file') - if (type(keylist) is not list) or (len(keylist) < 4) or (keylist[0] != 0): - raise SSHException('Not a valid RSA private key file (bad ber encoding)') - self.n = keylist[1] - self.e = keylist[2] - self.d = keylist[3] - # not really needed - self.p = keylist[4] - self.q = keylist[5] - self.size = util.bit_length(self.n) + key = serialization.load_der_private_key( + data, password=None, backend=default_backend() + ) + except ValueError as e: + raise SSHException(str(e)) + + assert isinstance(key, rsa.RSAPrivateKey) + self.key = key diff --git a/paramiko/server.py b/paramiko/server.py index bf5039a2..adc606bf 100644 --- a/paramiko/server.py +++ b/paramiko/server.py @@ -22,7 +22,10 @@ import threading from paramiko import util -from paramiko.common import DEBUG, ERROR, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, AUTH_FAILED +from paramiko.common import ( + DEBUG, ERROR, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, AUTH_FAILED, + AUTH_SUCCESSFUL, +) from paramiko.py3compat import string_types @@ -69,7 +72,7 @@ class ServerInterface (object): - ``OPEN_FAILED_CONNECT_FAILED`` - ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE`` - ``OPEN_FAILED_RESOURCE_SHORTAGE`` - + The default implementation always returns ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``. @@ -103,15 +106,15 @@ class ServerInterface (object): Determine if a client may open channels with no (further) authentication. - Return `.AUTH_FAILED` if the client must authenticate, or - `.AUTH_SUCCESSFUL` if it's okay for the client to not + Return ``AUTH_FAILED`` if the client must authenticate, or + ``AUTH_SUCCESSFUL`` if it's okay for the client to not authenticate. - The default implementation always returns `.AUTH_FAILED`. + The default implementation always returns ``AUTH_FAILED``. :param str username: the username of the client. :return: - `.AUTH_FAILED` if the authentication fails; `.AUTH_SUCCESSFUL` if + ``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if it succeeds. :rtype: int """ @@ -122,21 +125,21 @@ class ServerInterface (object): Determine if a given username and password supplied by the client is acceptable for use in authentication. - Return `.AUTH_FAILED` if the password is not accepted, - `.AUTH_SUCCESSFUL` if the password is accepted and completes - the authentication, or `.AUTH_PARTIALLY_SUCCESSFUL` if your + Return ``AUTH_FAILED`` if the password is not accepted, + ``AUTH_SUCCESSFUL`` if the password is accepted and completes + the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your authentication is stateful, and this key is accepted for authentication, but more authentication is required. (In this latter case, `get_allowed_auths` will be called to report to the client what options it has for continuing the authentication.) - The default implementation always returns `.AUTH_FAILED`. + The default implementation always returns ``AUTH_FAILED``. :param str username: the username of the authenticating client. :param str password: the password given by the client. :return: - `.AUTH_FAILED` if the authentication fails; `.AUTH_SUCCESSFUL` if - it succeeds; `.AUTH_PARTIALLY_SUCCESSFUL` if the password auth is + ``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if + it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the password auth is successful, but authentication must continue. :rtype: int """ @@ -149,9 +152,9 @@ class ServerInterface (object): check the username and key and decide if you would accept a signature made using this key. - Return `.AUTH_FAILED` if the key is not accepted, - `.AUTH_SUCCESSFUL` if the key is accepted and completes the - authentication, or `.AUTH_PARTIALLY_SUCCESSFUL` if your + Return ``AUTH_FAILED`` if the key is not accepted, + ``AUTH_SUCCESSFUL`` if the key is accepted and completes the + authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your authentication is stateful, and this password is accepted for authentication, but more authentication is required. (In this latter case, `get_allowed_auths` will be called to report to the client what @@ -160,54 +163,54 @@ class ServerInterface (object): Note that you don't have to actually verify any key signtature here. If you're willing to accept the key, Paramiko will do the work of verifying the client's signature. - - The default implementation always returns `.AUTH_FAILED`. + + The default implementation always returns ``AUTH_FAILED``. :param str username: the username of the authenticating client :param .PKey key: the key object provided by the client :return: - `.AUTH_FAILED` if the client can't authenticate with this key; - `.AUTH_SUCCESSFUL` if it can; `.AUTH_PARTIALLY_SUCCESSFUL` if it + ``AUTH_FAILED`` if the client can't authenticate with this key; + ``AUTH_SUCCESSFUL`` if it can; ``AUTH_PARTIALLY_SUCCESSFUL`` if it can authenticate with this key but must continue with authentication :rtype: int """ return AUTH_FAILED - + def check_auth_interactive(self, username, submethods): """ Begin an interactive authentication challenge, if supported. You should override this method in server mode if you want to support the ``"keyboard-interactive"`` auth type, which requires you to send a series of questions for the client to answer. - - Return `.AUTH_FAILED` if this auth method isn't supported. Otherwise, + + Return ``AUTH_FAILED`` if this auth method isn't supported. Otherwise, you should return an `.InteractiveQuery` object containing the prompts and instructions for the user. The response will be sent via a call to `check_auth_interactive_response`. - - The default implementation always returns `.AUTH_FAILED`. - + + The default implementation always returns ``AUTH_FAILED``. + :param str username: the username of the authenticating client :param str submethods: a comma-separated list of methods preferred by the client (usually empty) :return: - `.AUTH_FAILED` if this auth method isn't supported; otherwise an + ``AUTH_FAILED`` if this auth method isn't supported; otherwise an object containing queries for the user :rtype: int or `.InteractiveQuery` """ return AUTH_FAILED - + def check_auth_interactive_response(self, responses): """ Continue or finish an interactive authentication challenge, if supported. You should override this method in server mode if you want to support the ``"keyboard-interactive"`` auth type. - - Return `.AUTH_FAILED` if the responses are not accepted, - `.AUTH_SUCCESSFUL` if the responses are accepted and complete - the authentication, or `.AUTH_PARTIALLY_SUCCESSFUL` if your + + Return ``AUTH_FAILED`` if the responses are not accepted, + ``AUTH_SUCCESSFUL`` if the responses are accepted and complete + the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your authentication is stateful, and this set of responses is accepted for authentication, but more authentication is required. (In this latter case, `get_allowed_auths` will be called to report to the client what @@ -218,12 +221,12 @@ class ServerInterface (object): client to respond with more answers, calling this method again. This cycle can continue indefinitely. - The default implementation always returns `.AUTH_FAILED`. + The default implementation always returns ``AUTH_FAILED``. :param list responses: list of `str` responses from the client :return: - `.AUTH_FAILED` if the authentication fails; `.AUTH_SUCCESSFUL` if - it succeeds; `.AUTH_PARTIALLY_SUCCESSFUL` if the interactive auth + ``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if + it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the interactive auth is successful, but authentication must continue; otherwise an object containing queries for the user :rtype: int or `.InteractiveQuery` @@ -240,8 +243,8 @@ class ServerInterface (object): :param str username: The username of the authenticating client :param int gss_authenticated: The result of the krb5 authentication :param str cc_filename: The krb5 client credentials cache filename - :return: `.AUTH_FAILED` if the user is not authenticated otherwise - `.AUTH_SUCCESSFUL` + :return: ``AUTH_FAILED`` if the user is not authenticated otherwise + ``AUTH_SUCCESSFUL`` :rtype: int :note: Kerberos credential delegation is not supported. :see: `.ssh_gss` @@ -250,10 +253,11 @@ class ServerInterface (object): We don't check if the krb5 principal is allowed to log in on the server, because there is no way to do that in python. So if you develop your own SSH server with paramiko for a cetain - plattform like Linux, you should call C{krb5_kuserok()} in your - local kerberos library to make sure that the krb5_principal has - an account on the server and is allowed to log in as a user. - :see: `http://www.unix.com/man-page/all/3/krb5_kuserok/` + plattform like Linux, you should call C{krb5_kuserok()} in + your local kerberos library to make sure that the + krb5_principal has an account on the server and is allowed to + log in as a user. + :see: http://www.unix.com/man-page/all/3/krb5_kuserok/ """ if gss_authenticated == AUTH_SUCCESSFUL: return AUTH_SUCCESSFUL @@ -271,20 +275,21 @@ class ServerInterface (object): :param str username: The username of the authenticating client :param int gss_authenticated: The result of the krb5 authentication :param str cc_filename: The krb5 client credentials cache filename - :return: `.AUTH_FAILED` if the user is not authenticated otherwise - `.AUTH_SUCCESSFUL` + :return: ``AUTH_FAILED`` if the user is not authenticated otherwise + ``AUTH_SUCCESSFUL`` :rtype: int :note: Kerberos credential delegation is not supported. - :see: `.ssh_gss` `.kex_gss` + :see: `.ssh_gss` `.kex_gss` :note: : We are just checking in L{AuthHandler} that the given user is a valid krb5 principal! We don't check if the krb5 principal is allowed to log in on the server, because there is no way to do that in python. So if you develop your own SSH server with paramiko for a cetain - plattform like Linux, you should call C{krb5_kuserok()} in your - local kerberos library to make sure that the krb5_principal has - an account on the server and is allowed to log in as a user. - :see: `http://www.unix.com/man-page/all/3/krb5_kuserok/` + plattform like Linux, you should call C{krb5_kuserok()} in + your local kerberos library to make sure that the + krb5_principal has an account on the server and is allowed + to log in as a user. + :see: http://www.unix.com/man-page/all/3/krb5_kuserok/ """ if gss_authenticated == AUTH_SUCCESSFUL: return AUTH_SUCCESSFUL @@ -296,14 +301,12 @@ class ServerInterface (object): authentication. The default implementation always returns false. - :return: True if GSSAPI authentication is enabled otherwise false - :rtype: Boolean - :see: : `.ssh_gss` + :returns bool: Whether GSSAPI authentication is enabled. + :see: `.ssh_gss` """ UseGSSAPI = False - GSSAPICleanupCredentials = False return UseGSSAPI - + def check_port_forward_request(self, address, port): """ Handle a request for port forwarding. The client is asking that @@ -312,11 +315,11 @@ class ServerInterface (object): address (any address associated with this server) and a port of ``0`` indicates that no specific port is requested (usually the OS will pick a port). - + The default implementation always returns ``False``, rejecting the port forwarding request. If the request is accepted, you should return the port opened for listening. - + :param str address: the requested address :param int port: the requested port :return: @@ -324,18 +327,18 @@ class ServerInterface (object): to reject """ return False - + def cancel_port_forward_request(self, address, port): """ The client would like to cancel a previous port-forwarding request. If the given address and port is being forwarded across this ssh connection, the port should be closed. - + :param str address: the forwarded address :param int port: the forwarded port """ pass - + def check_global_request(self, kind, msg): """ Handle a global request of the given ``kind``. This method is called @@ -354,7 +357,7 @@ class ServerInterface (object): The default implementation always returns ``False``, indicating that it does not support any global requests. - + .. note:: Port forwarding requests are handled separately, in `check_port_forward_request`. @@ -366,10 +369,11 @@ class ServerInterface (object): """ return False - ### Channel requests + # ...Channel requests... - def check_channel_pty_request(self, channel, term, width, height, pixelwidth, pixelheight, - modes): + def check_channel_pty_request( + self, channel, term, width, height, pixelwidth, pixelheight, + modes): """ Determine if a pseudo-terminal of the given dimensions (usually requested for shell access) can be provided on the given channel. @@ -385,7 +389,7 @@ class ServerInterface (object): :param int pixelheight: height of screen in pixels, if known (may be ``0`` if unknown). :return: - ``True`` if the psuedo-terminal has been allocated; ``False`` + ``True`` if the pseudo-terminal has been allocated; ``False`` otherwise. """ return False @@ -411,20 +415,20 @@ class ServerInterface (object): Determine if a shell command will be executed for the client. If this method returns ``True``, the channel should be connected to the stdin, stdout, and stderr of the shell command. - + The default implementation always returns ``False``. - + :param .Channel channel: the `.Channel` the request arrived on. :param str command: the command to execute. :return: ``True`` if this channel is now hooked up to the stdin, stdout, and stderr of the executing command; ``False`` if the command will not be executed. - + .. versionadded:: 1.1 """ return False - + def check_channel_subsystem_request(self, channel, name): """ Determine if a requested subsystem will be provided to the client on @@ -447,14 +451,16 @@ class ServerInterface (object): ``True`` if this channel is now hooked up to the requested subsystem; ``False`` if that subsystem can't or won't be provided. """ - handler_class, larg, kwarg = channel.get_transport()._get_subsystem_handler(name) + transport = channel.get_transport() + handler_class, larg, kwarg = transport._get_subsystem_handler(name) if handler_class is None: return False handler = handler_class(channel, name, self, *larg, **kwarg) handler.start() return True - def check_channel_window_change_request(self, channel, width, height, pixelwidth, pixelheight): + def check_channel_window_change_request( + self, channel, width, height, pixelwidth, pixelheight): """ Determine if the pseudo-terminal on the given channel can be resized. This only makes sense if a pty was previously allocated on it. @@ -471,15 +477,17 @@ class ServerInterface (object): :return: ``True`` if the terminal was resized; ``False`` if not. """ return False - - def check_channel_x11_request(self, channel, single_connection, auth_protocol, auth_cookie, screen_number): + + def check_channel_x11_request( + self, channel, single_connection, auth_protocol, auth_cookie, + screen_number): """ Determine if the client will be provided with an X11 session. If this method returns ``True``, X11 applications should be routed through new SSH channels, using `.Transport.open_x11_channel`. - + The default implementation always returns ``False``. - + :param .Channel channel: the `.Channel` the X11 request arrived on :param bool single_connection: ``True`` if only a single X11 channel should be opened, else @@ -529,7 +537,7 @@ class ServerInterface (object): - ``OPEN_FAILED_CONNECT_FAILED`` - ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE`` - ``OPEN_FAILED_RESOURCE_SHORTAGE`` - + The default implementation always returns ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``. @@ -567,14 +575,14 @@ class InteractiveQuery (object): """ A query (set of prompts) for a user during interactive authentication. """ - + def __init__(self, name='', instructions='', *prompts): """ Create a new interactive query to send to the client. The name and instructions are optional, but are generally displayed to the end user. A list of prompts may be included, or they may be added via the `add_prompt` method. - + :param str name: name of this query :param str instructions: user instructions (usually short) about this query @@ -588,12 +596,12 @@ class InteractiveQuery (object): self.add_prompt(x) else: self.add_prompt(x[0], x[1]) - + def add_prompt(self, prompt, echo=True): """ Add a prompt to this query. The prompt should be a (reasonably short) string. Multiple prompts can be added to the same query. - + :param str prompt: the user prompt :param bool echo: ``True`` (default) if the user's response should be echoed; @@ -621,10 +629,11 @@ class SubsystemHandler (threading.Thread): Create a new handler for a channel. This is used by `.ServerInterface` to start up a new handler when a channel requests this subsystem. You don't need to override this method, but if you do, be sure to pass the - ``channel`` and ``name`` parameters through to the original ``__init__`` - method here. + ``channel`` and ``name`` parameters through to the original + ``__init__`` method here. - :param .Channel channel: the channel associated with this subsystem request. + :param .Channel channel: the channel associated with this + subsystem request. :param str name: name of the requested subsystem. :param .ServerInterface server: the server object for the session that started this subsystem @@ -634,7 +643,7 @@ class SubsystemHandler (threading.Thread): self.__transport = channel.get_transport() self.__name = name self.__server = server - + def get_server(self): """ Return the `.ServerInterface` object associated with this channel and @@ -644,11 +653,16 @@ class SubsystemHandler (threading.Thread): def _run(self): try: - self.__transport._log(DEBUG, 'Starting handler for subsystem %s' % self.__name) + self.__transport._log( + DEBUG, 'Starting handler for subsystem %s' % self.__name) self.start_subsystem(self.__name, self.__transport, self.__channel) except Exception as e: - self.__transport._log(ERROR, 'Exception in subsystem handler for "%s": %s' % - (self.__name, str(e))) + self.__transport._log( + ERROR, + 'Exception in subsystem handler for "{0}": {1}'.format( + self.__name, e + ) + ) self.__transport._log(ERROR, util.tb_strings()) try: self.finish_subsystem() @@ -663,8 +677,8 @@ class SubsystemHandler (threading.Thread): subsystem is finished, this method will return. After this method returns, the channel is closed. - The combination of ``transport`` and ``channel`` are unique; this handler - corresponds to exactly one `.Channel` on one `.Transport`. + The combination of ``transport`` and ``channel`` are unique; this + handler corresponds to exactly one `.Channel` on one `.Transport`. .. note:: It is the responsibility of this method to exit if the underlying @@ -676,7 +690,8 @@ class SubsystemHandler (threading.Thread): :param str name: name of the requested subsystem. :param .Transport transport: the server-mode `.Transport`. - :param .Channel channel: the channel associated with this subsystem request. + :param .Channel channel: the channel associated with this subsystem + request. """ pass diff --git a/paramiko/sftp.py b/paramiko/sftp.py index f44a804d..e6786d10 100644 --- a/paramiko/sftp.py +++ b/paramiko/sftp.py @@ -26,15 +26,17 @@ from paramiko.message import Message from paramiko.py3compat import byte_chr, byte_ord -CMD_INIT, CMD_VERSION, CMD_OPEN, CMD_CLOSE, CMD_READ, CMD_WRITE, CMD_LSTAT, CMD_FSTAT, \ - CMD_SETSTAT, CMD_FSETSTAT, CMD_OPENDIR, CMD_READDIR, CMD_REMOVE, CMD_MKDIR, \ - CMD_RMDIR, CMD_REALPATH, CMD_STAT, CMD_RENAME, CMD_READLINK, CMD_SYMLINK = range(1, 21) +CMD_INIT, CMD_VERSION, CMD_OPEN, CMD_CLOSE, CMD_READ, CMD_WRITE, CMD_LSTAT, \ + CMD_FSTAT, CMD_SETSTAT, CMD_FSETSTAT, CMD_OPENDIR, CMD_READDIR, \ + CMD_REMOVE, CMD_MKDIR, CMD_RMDIR, CMD_REALPATH, CMD_STAT, CMD_RENAME, \ + CMD_READLINK, CMD_SYMLINK = range(1, 21) CMD_STATUS, CMD_HANDLE, CMD_DATA, CMD_NAME, CMD_ATTRS = range(101, 106) CMD_EXTENDED, CMD_EXTENDED_REPLY = range(200, 202) SFTP_OK = 0 -SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, SFTP_BAD_MESSAGE, \ - SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, SFTP_OP_UNSUPPORTED = range(1, 9) +SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, \ + SFTP_BAD_MESSAGE, SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, \ + SFTP_OP_UNSUPPORTED = range(1, 9) SFTP_DESC = ['Success', 'End of file', @@ -98,7 +100,7 @@ class BaseSFTP (object): self.sock = None self.ultra_debug = False - ### internals... + # ...internals... def _send_version(self): self._send_packet(CMD_INIT, struct.pack('>I', _VERSION)) @@ -124,7 +126,7 @@ class BaseSFTP (object): msg.add(*extension_pairs) self._send_packet(CMD_VERSION, msg) return version - + def _log(self, level, msg, *args): self.logger.log(level, msg, *args) @@ -154,7 +156,7 @@ class BaseSFTP (object): break else: x = self.sock.recv(n) - + if len(x) == 0: raise EOFError() out += x @@ -162,7 +164,6 @@ class BaseSFTP (object): return out def _send_packet(self, t, packet): - #self._log(DEBUG2, 'write: %s (len=%d)' % (CMD_NAMES.get(t, '0x%02x' % t), len(packet))) packet = asbytes(packet) out = struct.pack('>I', len(packet) + 1) + byte_chr(t) + packet if self.ultra_debug: @@ -181,6 +182,5 @@ class BaseSFTP (object): self._log(DEBUG, util.format_binary(data, 'IN: ')) if size > 0: t = byte_ord(data[0]) - #self._log(DEBUG2, 'read: %s (len=%d)' % (CMD_NAMES.get(t), '0x%02x' % t, len(data)-1)) return t, data[1:] return 0, bytes() diff --git a/paramiko/sftp_attr.py b/paramiko/sftp_attr.py index cf48f654..5597948a 100644 --- a/paramiko/sftp_attr.py +++ b/paramiko/sftp_attr.py @@ -84,7 +84,7 @@ class SFTPAttributes (object): def __repr__(self): return '<SFTPAttributes: %s>' % self._debug_str() - ### internals... + # ...internals... @classmethod def _from_msg(cls, msg, filename=None, longname=None): attr = cls() @@ -189,9 +189,12 @@ class SFTPAttributes (object): ks = 's' else: ks = '?' - ks += self._rwx((self.st_mode & o700) >> 6, self.st_mode & stat.S_ISUID) - ks += self._rwx((self.st_mode & o70) >> 3, self.st_mode & stat.S_ISGID) - ks += self._rwx(self.st_mode & 7, self.st_mode & stat.S_ISVTX, True) + ks += self._rwx( + (self.st_mode & o700) >> 6, self.st_mode & stat.S_ISUID) + ks += self._rwx( + (self.st_mode & o70) >> 3, self.st_mode & stat.S_ISGID) + ks += self._rwx( + self.st_mode & 7, self.st_mode & stat.S_ISVTX, True) else: ks = '?---------' # compute display date @@ -201,20 +204,26 @@ class SFTPAttributes (object): else: if abs(time.time() - self.st_mtime) > 15552000: # (15552000 = 6 months) - datestr = time.strftime('%d %b %Y', time.localtime(self.st_mtime)) + datestr = time.strftime( + '%d %b %Y', time.localtime(self.st_mtime)) else: - datestr = time.strftime('%d %b %H:%M', time.localtime(self.st_mtime)) + datestr = time.strftime( + '%d %b %H:%M', time.localtime(self.st_mtime)) filename = getattr(self, 'filename', '?') # not all servers support uid/gid uid = self.st_uid gid = self.st_gid + size = self.st_size if uid is None: uid = 0 if gid is None: gid = 0 + if size is None: + size = 0 - return '%s 1 %-8d %-8d %8d %-12s %s' % (ks, uid, gid, self.st_size, datestr, filename) + return '%s 1 %-8d %-8d %8d %-12s %s' % ( + ks, uid, gid, size, datestr, filename) def asbytes(self): return b(str(self)) diff --git a/paramiko/sftp_client.py b/paramiko/sftp_client.py index 2979544f..cf7785cf 100644 --- a/paramiko/sftp_client.py +++ b/paramiko/sftp_client.py @@ -28,13 +28,17 @@ from paramiko import util from paramiko.channel import Channel from paramiko.message import Message from paramiko.common import INFO, DEBUG, o777 -from paramiko.py3compat import bytestring, b, u, long, string_types, bytes_types -from paramiko.sftp import BaseSFTP, CMD_OPENDIR, CMD_HANDLE, SFTPError, CMD_READDIR, \ - CMD_NAME, CMD_CLOSE, SFTP_FLAG_READ, SFTP_FLAG_WRITE, SFTP_FLAG_CREATE, \ - SFTP_FLAG_TRUNC, SFTP_FLAG_APPEND, SFTP_FLAG_EXCL, CMD_OPEN, CMD_REMOVE, \ - CMD_RENAME, CMD_MKDIR, CMD_RMDIR, CMD_STAT, CMD_ATTRS, CMD_LSTAT, \ - CMD_SYMLINK, CMD_SETSTAT, CMD_READLINK, CMD_REALPATH, CMD_STATUS, CMD_EXTENDED, SFTP_OK, \ - SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED +from paramiko.py3compat import ( + bytestring, b, u, long, string_types, bytes_types, +) +from paramiko.sftp import ( + BaseSFTP, CMD_OPENDIR, CMD_HANDLE, SFTPError, CMD_READDIR, CMD_NAME, + CMD_CLOSE, SFTP_FLAG_READ, SFTP_FLAG_WRITE, SFTP_FLAG_CREATE, + SFTP_FLAG_TRUNC, SFTP_FLAG_APPEND, SFTP_FLAG_EXCL, CMD_OPEN, CMD_REMOVE, + CMD_RENAME, CMD_MKDIR, CMD_RMDIR, CMD_STAT, CMD_ATTRS, CMD_LSTAT, + CMD_SYMLINK, CMD_SETSTAT, CMD_READLINK, CMD_REALPATH, CMD_STATUS, + CMD_EXTENDED, SFTP_OK, SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, +) from paramiko.sftp_attr import SFTPAttributes from paramiko.ssh_exception import SSHException @@ -56,6 +60,7 @@ def _to_unicode(s): except UnicodeError: return s + b_slash = b'/' @@ -78,8 +83,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager): :param .Channel sock: an open `.Channel` using the ``"sftp"`` subsystem - :raises SSHException: if there's an exception while negotiating - sftp + :raises: + `.SSHException` -- if there's an exception while negotiating sftp """ BaseSFTP.__init__(self) self.sock = sock @@ -93,13 +98,16 @@ class SFTPClient(BaseSFTP, ClosingContextManager): if type(sock) is Channel: # override default logger transport = self.sock.get_transport() - self.logger = util.get_logger(transport.get_log_channel() + '.sftp') + self.logger = util.get_logger( + transport.get_log_channel() + '.sftp') self.ultra_debug = transport.get_hexdump() try: server_version = self._send_version() except EOFError: raise SSHException('EOF during negotiation') - self._log(INFO, 'Opened sftp connection (server version %d)' % server_version) + self._log( + INFO, + 'Opened sftp connection (server version %d)' % server_version) @classmethod def from_transport(cls, t, window_size=None, max_packet_size=None): @@ -111,7 +119,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager): OpenSSH and should work adequately for both files transfers and interactive sessions. - :param .Transport t: an open `.Transport` which is already authenticated + :param .Transport t: an open `.Transport` which is already + authenticated :param int window_size: optional window size for the `.SFTPClient` session. :param int max_packet_size: @@ -136,9 +145,12 @@ class SFTPClient(BaseSFTP, ClosingContextManager): for m in msg: self._log(level, m, *args) else: - # escape '%' in msg (they could come from file or directory names) before logging - msg = msg.replace('%','%%') - super(SFTPClient, self)._log(level, "[chan %s] " + msg, *([self.sock.get_name()] + list(args))) + # escape '%' in msg (they could come from file or directory names) + # before logging + msg = msg.replace('%', '%%') + super(SFTPClient, self)._log( + level, + "[chan %s] " + msg, *([self.sock.get_name()] + list(args))) def close(self): """ @@ -160,7 +172,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager): def listdir(self, path='.'): """ - Return a list containing the names of the entries in the given ``path``. + Return a list containing the names of the entries in the given + ``path``. The list is in arbitrary order. It does not include the special entries ``'.'`` and ``'..'`` even if they are present in the folder. @@ -223,7 +236,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager): ``read_aheads``, an integer controlling how many ``SSH_FXP_READDIR`` requests are made to the server. The default of 50 should suffice for most file listings as each request/response cycle - may contain multiple files (dependant on server implementation.) + may contain multiple files (dependent on server implementation.) .. versionadded:: 1.15 """ @@ -308,7 +321,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager): :param int bufsize: desired buffering (-1 = default buffer size) :return: an `.SFTPFile` object representing the open file - :raises IOError: if the file could not be opened. + :raises: ``IOError`` -- if the file could not be opened. """ filename = self._adjust_cwd(filename) self._log(DEBUG, 'open(%r, %r)' % (filename, mode)) @@ -328,7 +341,9 @@ class SFTPClient(BaseSFTP, ClosingContextManager): if t != CMD_HANDLE: raise SFTPError('Expected handle') handle = msg.get_binary() - self._log(DEBUG, 'open(%r, %r) -> %s' % (filename, mode, hexlify(handle))) + self._log( + DEBUG, + 'open(%r, %r) -> %s' % (filename, mode, hexlify(handle))) return SFTPFile(self, handle, mode, bufsize) # Python continues to vacillate about "open" vs "file"... @@ -341,7 +356,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager): :param str path: path (absolute or relative) of the file to remove - :raises IOError: if the path refers to a folder (directory) + :raises: ``IOError`` -- if the path refers to a folder (directory) """ path = self._adjust_cwd(path) self._log(DEBUG, 'remove(%r)' % path) @@ -356,7 +371,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager): :param str oldpath: existing name of the file or folder :param str newpath: new name for the file or folder, must not exist already - :raises IOError: if ``newpath`` is a folder, or something else goes + :raises: + ``IOError`` -- if ``newpath`` is a folder, or something else goes wrong """ oldpath = self._adjust_cwd(oldpath) @@ -497,12 +513,12 @@ class SFTPClient(BaseSFTP, ClosingContextManager): def utime(self, path, times): """ - Set the access and modified times of the file specified by ``path``. If - ``times`` is ``None``, then the file's access and modified times are set - to the current time. Otherwise, ``times`` must be a 2-tuple of numbers, - of the form ``(atime, mtime)``, which is used to set the access and - modified times, respectively. This bizarre API is mimicked from Python - for the sake of consistency -- I apologize. + Set the access and modified times of the file specified by ``path``. + If ``times`` is ``None``, then the file's access and modified times + are set to the current time. Otherwise, ``times`` must be a 2-tuple + of numbers, of the form ``(atime, mtime)``, which is used to set the + access and modified times, respectively. This bizarre API is mimicked + from Python for the sake of consistency -- I apologize. :param str path: path of the file to modify :param tuple times: @@ -524,8 +540,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager): method on Python file objects. :param str path: path of the file to modify - :param size: the new size of the file - :type size: int or long + :param int size: the new size of the file """ path = self._adjust_cwd(path) self._log(DEBUG, 'truncate(%r, %r)' % (path, size)) @@ -564,7 +579,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager): :param str path: path to be normalized :return: normalized form of the given path (as a `str`) - :raises IOError: if the path can't be resolved on the server + :raises: ``IOError`` -- if the path can't be resolved on the server """ path = self._adjust_cwd(path) self._log(DEBUG, 'normalize(%r)' % path) @@ -587,7 +602,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager): :param str path: new current working directory - :raises IOError: if the requested path doesn't exist on the server + :raises: + ``IOError`` -- if the requested path doesn't exist on the server .. versionadded:: 1.4 """ @@ -595,7 +611,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager): self._cwd = None return if not stat.S_ISDIR(self.stat(path).st_mode): - raise SFTPError(errno.ENOTDIR, "%s: %s" % (os.strerror(errno.ENOTDIR), path)) + raise SFTPError( + errno.ENOTDIR, "%s: %s" % (os.strerror(errno.ENOTDIR), path)) self._cwd = b(self.normalize(path)) def getcwd(self): @@ -606,8 +623,21 @@ class SFTPClient(BaseSFTP, ClosingContextManager): .. versionadded:: 1.4 """ + # TODO: make class initialize with self._cwd set to self.normalize('.') return self._cwd and u(self._cwd) + def _transfer_with_callback(self, reader, writer, file_size, callback): + size = 0 + while True: + data = reader.read(32768) + writer.write(data) + size += len(data) + if len(data) == 0: + break + if callback is not None: + callback(size, file_size) + return size + def putfo(self, fl, remotepath, file_size=0, callback=None, confirm=True): """ Copy the contents of an open file object (``fl``) to the SFTP server as @@ -616,7 +646,7 @@ class SFTPClient(BaseSFTP, ClosingContextManager): The SFTP operations use pipelining for speed. - :param file fl: opened file or file-like object to copy + :param fl: opened file or file-like object to copy :param str remotepath: the destination path on the SFTP server :param int file_size: optional size parameter passed to callback. If none is specified, @@ -637,19 +667,14 @@ class SFTPClient(BaseSFTP, ClosingContextManager): """ with self.file(remotepath, 'wb') as fr: fr.set_pipelined(True) - size = 0 - while True: - data = fl.read(32768) - fr.write(data) - size += len(data) - if callback is not None: - callback(size, file_size) - if len(data) == 0: - break + size = self._transfer_with_callback( + reader=fl, writer=fr, file_size=file_size, callback=callback + ) if confirm: s = self.stat(remotepath) if s.st_size != size: - raise IOError('size mismatch in put! %d != %d' % (s.st_size, size)) + raise IOError( + 'size mismatch in put! %d != %d' % (s.st_size, size)) else: s = SFTPAttributes() return s @@ -673,7 +698,8 @@ class SFTPClient(BaseSFTP, ClosingContextManager): whether to do a stat() on the file afterwards to confirm the file size - :return: an `.SFTPAttributes` object containing attributes about the given file + :return: an `.SFTPAttributes` object containing attributes about the + given file .. versionadded:: 1.4 .. versionchanged:: 1.7.4 @@ -702,19 +728,12 @@ class SFTPClient(BaseSFTP, ClosingContextManager): .. versionadded:: 1.10 """ + file_size = self.stat(remotepath).st_size with self.open(remotepath, 'rb') as fr: - file_size = self.stat(remotepath).st_size - fr.prefetch() - size = 0 - while True: - data = fr.read(32768) - fl.write(data) - size += len(data) - if callback is not None: - callback(size, file_size) - if len(data) == 0: - break - return size + fr.prefetch(file_size) + return self._transfer_with_callback( + reader=fr, writer=fl, file_size=file_size, callback=callback + ) def get(self, remotepath, localpath, callback=None): """ @@ -732,14 +751,14 @@ class SFTPClient(BaseSFTP, ClosingContextManager): .. versionchanged:: 1.7.4 Added the ``callback`` param """ - file_size = self.stat(remotepath).st_size with open(localpath, 'wb') as fl: size = self.getfo(remotepath, fl, callback) s = os.stat(localpath) if s.st_size != size: - raise IOError('size mismatch in get! %d != %d' % (s.st_size, size)) + raise IOError( + 'size mismatch in get! %d != %d' % (s.st_size, size)) - ### internals... + # ...internals... def _request(self, t, *arg): num = self._async_request(type(None), t, *arg) @@ -761,13 +780,14 @@ class SFTPClient(BaseSFTP, ClosingContextManager): elif isinstance(item, SFTPAttributes): item._pack(msg) else: - raise Exception('unknown type for %r type %r' % (item, type(item))) + raise Exception( + 'unknown type for %r type %r' % (item, type(item))) num = self.request_number self._expecting[num] = fileobj - self._send_packet(t, msg) self.request_number += 1 finally: self._lock.release() + self._send_packet(t, msg) return num def _read_response(self, waitfor=None): @@ -778,21 +798,29 @@ class SFTPClient(BaseSFTP, ClosingContextManager): raise SSHException('Server connection dropped: %s' % str(e)) msg = Message(data) num = msg.get_int() - if num not in self._expecting: - # might be response for a file that was closed before responses came back - self._log(DEBUG, 'Unexpected response #%d' % (num,)) - if waitfor is None: - # just doing a single check - break - continue - fileobj = self._expecting[num] - del self._expecting[num] + self._lock.acquire() + try: + if num not in self._expecting: + # might be response for a file that was closed before + # responses came back + self._log(DEBUG, 'Unexpected response #%d' % (num,)) + if waitfor is None: + # just doing a single check + break + continue + fileobj = self._expecting[num] + del self._expecting[num] + finally: + self._lock.release() if num == waitfor: # synchronous if t == CMD_STATUS: self._convert_status(msg) return t, msg - if fileobj is not type(None): + + # can not rewrite this to deal with E721, either as a None check + # nor as not an instance of None or NoneType + if fileobj is not type(None): # noqa fileobj._async_response(t, msg, num) if waitfor is None: # just doing a single check @@ -840,6 +868,6 @@ class SFTPClient(BaseSFTP, ClosingContextManager): class SFTP(SFTPClient): """ - An alias for `.SFTPClient` for backwards compatability. + An alias for `.SFTPClient` for backwards compatibility. """ pass diff --git a/paramiko/sftp_file.py b/paramiko/sftp_file.py index d0a37da3..337cdbeb 100644 --- a/paramiko/sftp_file.py +++ b/paramiko/sftp_file.py @@ -31,8 +31,10 @@ from paramiko.common import DEBUG from paramiko.file import BufferedFile from paramiko.py3compat import long -from paramiko.sftp import CMD_CLOSE, CMD_READ, CMD_DATA, SFTPError, CMD_WRITE, \ - CMD_STATUS, CMD_FSTAT, CMD_ATTRS, CMD_FSETSTAT, CMD_EXTENDED +from paramiko.sftp import ( + CMD_CLOSE, CMD_READ, CMD_DATA, SFTPError, CMD_WRITE, CMD_STATUS, CMD_FSTAT, + CMD_ATTRS, CMD_FSETSTAT, CMD_EXTENDED, +) from paramiko.sftp_attr import SFTPAttributes @@ -64,13 +66,13 @@ class SFTPFile (BufferedFile): def __del__(self): self._close(async=True) - + def close(self): """ Close the file. """ self._close(async=False) - + def _close(self, async=False): # We allow double-close without signaling an error, because real # Python file objects do. However, we must protect against actually @@ -87,7 +89,8 @@ class SFTPFile (BufferedFile): BufferedFile.close(self) try: if async: - # GC'd file handle could be called from an arbitrary thread -- don't wait for a response + # GC'd file handle could be called from an arbitrary thread + # -- don't wait for a response self.sftp._async_request(type(None), CMD_CLOSE, self.handle) else: self.sftp._request(CMD_CLOSE, self.handle) @@ -99,7 +102,8 @@ class SFTPFile (BufferedFile): pass def _data_in_prefetch_requests(self, offset, size): - k = [x for x in list(self._prefetch_extents.values()) if x[0] <= offset] + k = [x for x in list(self._prefetch_extents.values()) + if x[0] <= offset] if len(k) == 0: return False k.sort(key=lambda x: x[0]) @@ -110,9 +114,12 @@ class SFTPFile (BufferedFile): if buf_offset + buf_size >= offset + size: # inclusive return True - # well, we have part of the request. see if another chunk has the rest. - return self._data_in_prefetch_requests(buf_offset + buf_size, offset + size - buf_offset - buf_size) - + # well, we have part of the request. see if another chunk has + # the rest. + return self._data_in_prefetch_requests( + buf_offset + buf_size, + offset + size - buf_offset - buf_size) + def _data_in_prefetch_buffers(self, offset): """ if a block of data is present in the prefetch buffers, at the given @@ -129,13 +136,14 @@ class SFTPFile (BufferedFile): # it's not here return None return index - + def _read_prefetch(self, size): """ read data out of the prefetch buffer, if possible. if the data isn't in the buffer, return None. otherwise, behaves like a normal read. """ - # while not closed, and haven't fetched past the current position, and haven't reached EOF... + # while not closed, and haven't fetched past the current position, + # and haven't reached EOF... while True: offset = self._data_in_prefetch_buffers(self._realpos) if offset is not None: @@ -149,7 +157,7 @@ class SFTPFile (BufferedFile): return None prefetch = self._prefetch_data[offset] del self._prefetch_data[offset] - + buf_offset = self._realpos - offset if buf_offset > 0: self._prefetch_data[offset] = prefetch[:buf_offset] @@ -158,14 +166,19 @@ class SFTPFile (BufferedFile): self._prefetch_data[self._realpos + size] = prefetch[size:] prefetch = prefetch[:size] return prefetch - + def _read(self, size): size = min(size, self.MAX_REQUEST_SIZE) if self._prefetching: data = self._read_prefetch(size) if data is not None: return data - t, msg = self.sftp._request(CMD_READ, self.handle, long(self._realpos), int(size)) + t, msg = self.sftp._request( + CMD_READ, + self.handle, + long(self._realpos), + int(size) + ) if t != CMD_DATA: raise SFTPError('Expected data') return msg.get_string() @@ -173,8 +186,18 @@ class SFTPFile (BufferedFile): def _write(self, data): # may write less than requested if it would exceed max packet size chunk = min(len(data), self.MAX_REQUEST_SIZE) - self._reqs.append(self.sftp._async_request(type(None), CMD_WRITE, self.handle, long(self._realpos), data[:chunk])) - if not self.pipelined or (len(self._reqs) > 100 and self.sftp.sock.recv_ready()): + sftp_async_request = self.sftp._async_request( + type(None), + CMD_WRITE, + self.handle, + long(self._realpos), + data[:chunk] + ) + self._reqs.append(sftp_async_request) + if ( + not self.pipelined or + (len(self._reqs) > 100 and self.sftp.sock.recv_ready()) + ): while len(self._reqs): req = self._reqs.popleft() t, msg = self.sftp._read_response(req) @@ -217,7 +240,22 @@ class SFTPFile (BufferedFile): """ self.sftp.sock.setblocking(blocking) + def seekable(self): + """ + Check if the file supports random access. + + :return: + `True` if the file supports random access. If `False`, + :meth:`seek` will raise an exception + """ + return True + def seek(self, offset, whence=0): + """ + Set the file's current position. + + See `file.seek` for details. + """ self.flush() if whence == self.SEEK_SET: self._realpos = self._pos = offset @@ -234,7 +272,8 @@ class SFTPFile (BufferedFile): exactly like `.SFTPClient.stat`, except that it operates on an already-open file. - :return: an `.SFTPAttributes` object containing attributes about this file. + :returns: + an `.SFTPAttributes` object containing attributes about this file. """ t, msg = self.sftp._request(CMD_FSTAT, self.handle) if t != CMD_ATTRS: @@ -253,7 +292,7 @@ class SFTPFile (BufferedFile): attr = SFTPAttributes() attr.st_mode = mode self.sftp._request(CMD_FSETSTAT, self.handle, attr) - + def chown(self, uid, gid): """ Change the owner (``uid``) and group (``gid``) of this file. As with @@ -264,7 +303,9 @@ class SFTPFile (BufferedFile): :param int uid: new owner's uid :param int gid: new group id """ - self.sftp._log(DEBUG, 'chown(%s, %r, %r)' % (hexlify(self.handle), uid, gid)) + self.sftp._log( + DEBUG, + 'chown(%s, %r, %r)' % (hexlify(self.handle), uid, gid)) attr = SFTPAttributes() attr.st_uid, attr.st_gid = uid, gid self.sftp._request(CMD_FSETSTAT, self.handle, attr) @@ -272,11 +313,11 @@ class SFTPFile (BufferedFile): def utime(self, times): """ Set the access and modified times of this file. If - ``times`` is ``None``, then the file's access and modified times are set - to the current time. Otherwise, ``times`` must be a 2-tuple of numbers, - of the form ``(atime, mtime)``, which is used to set the access and - modified times, respectively. This bizarre API is mimicked from Python - for the sake of consistency -- I apologize. + ``times`` is ``None``, then the file's access and modified times are + set to the current time. Otherwise, ``times`` must be a 2-tuple of + numbers, of the form ``(atime, mtime)``, which is used to set the + access and modified times, respectively. This bizarre API is mimicked + from Python for the sake of consistency -- I apologize. :param tuple times: ``None`` or a tuple of (access time, modified time) in standard @@ -294,25 +335,26 @@ class SFTPFile (BufferedFile): Change the size of this file. This usually extends or shrinks the size of the file, just like the ``truncate()`` method on Python file objects. - + :param size: the new size of the file - :type size: int or long """ - self.sftp._log(DEBUG, 'truncate(%s, %r)' % (hexlify(self.handle), size)) + self.sftp._log( + DEBUG, + 'truncate(%s, %r)' % (hexlify(self.handle), size)) attr = SFTPAttributes() attr.st_size = size self.sftp._request(CMD_FSETSTAT, self.handle, attr) - + def check(self, hash_algorithm, offset=0, length=0, block_size=0): """ Ask the server for a hash of a section of this file. This can be used to verify a successful upload or download, or for various rsync-like operations. - - The file is hashed from ``offset``, for ``length`` bytes. If ``length`` - is 0, the remainder of the file is hashed. Thus, if both ``offset`` - and ``length`` are zero, the entire file is hashed. - + + The file is hashed from ``offset``, for ``length`` bytes. + If ``length`` is 0, the remainder of the file is hashed. Thus, if both + ``offset`` and ``length`` are zero, the entire file is hashed. + Normally, ``block_size`` will be 0 (the default), and this method will return a byte string representing the requested hash (for example, a string of length 16 for MD5, or 20 for SHA-1). If a non-zero @@ -320,45 +362,43 @@ class SFTPFile (BufferedFile): ``offset + length``) of ``block_size`` bytes is computed as a separate hash. The hash results are all concatenated and returned as a single string. - + For example, ``check('sha1', 0, 1024, 512)`` will return a string of length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes of the file, and the last 20 bytes will be the SHA-1 of the next 512 bytes. - + :param str hash_algorithm: the name of the hash algorithm to use (normally ``"sha1"`` or ``"md5"``) :param offset: offset into the file to begin hashing (0 means to start from the beginning) - :type offset: int or long :param length: number of bytes to hash (0 means continue to the end of the file) - :type length: int or long :param int block_size: number of bytes to hash per result (must not be less than 256; 0 means to compute only one hash of the entire segment) - :type block_size: int :return: `str` of bytes representing the hash of each block, concatenated together - - :raises IOError: if the server doesn't support the "check-file" - extension, or possibly doesn't support the hash algorithm - requested - + + :raises: + ``IOError`` -- if the server doesn't support the "check-file" + extension, or possibly doesn't support the hash algorithm requested + .. note:: Many (most?) servers don't support this extension yet. - + .. versionadded:: 1.4 """ - t, msg = self.sftp._request(CMD_EXTENDED, 'check-file', self.handle, - hash_algorithm, long(offset), long(length), block_size) - ext = msg.get_text() - alg = msg.get_text() + t, msg = self.sftp._request( + CMD_EXTENDED, 'check-file', self.handle, + hash_algorithm, long(offset), long(length), block_size) + msg.get_text() # ext + msg.get_text() # alg data = msg.get_remainder() return data - + def set_pipelined(self, pipelined=True): """ Turn on/off the pipelining of write operations to this file. When @@ -368,55 +408,69 @@ class SFTPFile (BufferedFile): server responses are collected. This means that if there was an error with one of your later writes, an exception might be thrown from within `.close` instead of `.write`. - + By default, files are not pipelined. - + :param bool pipelined: ``True`` if pipelining should be turned on for this file; ``False`` otherwise - + .. versionadded:: 1.5 """ self.pipelined = pipelined - - def prefetch(self): + + def prefetch(self, file_size=None): """ Pre-fetch the remaining contents of this file in anticipation of future `.read` calls. If reading the entire file, pre-fetching can dramatically improve the download speed by avoiding roundtrip latency. The file's contents are incrementally buffered in a background thread. - + The prefetched data is stored in a buffer until read via the `.read` method. Once data has been read, it's removed from the buffer. The data may be read in a random order (using `.seek`); chunks of the buffer that haven't been read will continue to be buffered. + :param int file_size: + When this is ``None`` (the default), this method calls `stat` to + determine the remote file size. In some situations, doing so can + cause exceptions or hangs (see `#562 + <https://github.com/paramiko/paramiko/pull/562>`_); as a + workaround, one may call `stat` explicitly and pass its value in + via this parameter. + .. versionadded:: 1.5.1 + .. versionchanged:: 1.16.0 + The ``file_size`` parameter was added (with no default value). + .. versionchanged:: 1.16.1 + The ``file_size`` parameter was made optional for backwards + compatibility. """ - size = self.stat().st_size + if file_size is None: + file_size = self.stat().st_size + # queue up async reads for the rest of the file chunks = [] n = self._realpos - while n < size: - chunk = min(self.MAX_REQUEST_SIZE, size - n) + while n < file_size: + chunk = min(self.MAX_REQUEST_SIZE, file_size - n) chunks.append((n, chunk)) n += chunk if len(chunks) > 0: self._start_prefetch(chunks) - + def readv(self, chunks): """ Read a set of blocks from the file by (offset, length). This is more efficient than doing a series of `.seek` and `.read` calls, since the prefetch machinery is used to retrieve all the requested blocks at once. - + :param chunks: - a list of (offset, length) tuples indicating which sections of the - file to read - :type chunks: list(tuple(long, int)) + a list of ``(offset, length)`` tuples indicating which sections of + the file to read :return: a list of blocks read, in the same order as in ``chunks`` - + .. versionadded:: 1.5.4 """ self.sftp._log(DEBUG, 'readv(%s, %r)' % (hexlify(self.handle), chunks)) @@ -424,7 +478,10 @@ class SFTPFile (BufferedFile): read_chunks = [] for offset, size in chunks: # don't fetch data that's already in the prefetch buffer - if self._data_in_prefetch_buffers(offset) or self._data_in_prefetch_requests(offset, size): + if ( + self._data_in_prefetch_buffers(offset) or + self._data_in_prefetch_requests(offset, size) + ): continue # break up anything larger than the max read size @@ -440,7 +497,7 @@ class SFTPFile (BufferedFile): self.seek(x[0]) yield self.read(x[1]) - ### internals... + # ...internals... def _get_size(self): try: @@ -455,13 +512,18 @@ class SFTPFile (BufferedFile): t = threading.Thread(target=self._prefetch_thread, args=(chunks,)) t.setDaemon(True) t.start() - + def _prefetch_thread(self, chunks): # do these read requests in a temporary thread because there may be # a lot of them, so it may block. for offset, length in chunks: + num = self.sftp._async_request( + self, + CMD_READ, + self.handle, + long(offset), + int(length)) with self._prefetch_lock: - num = self.sftp._async_request(self, CMD_READ, self.handle, long(offset), int(length)) self._prefetch_extents[num] = (offset, length) def _async_response(self, t, msg, num): @@ -475,13 +537,17 @@ class SFTPFile (BufferedFile): if t != CMD_DATA: raise SFTPError('Expected data') data = msg.get_string() - with self._prefetch_lock: - offset, length = self._prefetch_extents[num] - self._prefetch_data[offset] = data - del self._prefetch_extents[num] - if len(self._prefetch_extents) == 0: - self._prefetch_done = True - + while True: + with self._prefetch_lock: + # spin if in race with _prefetch_thread + if num in self._prefetch_extents: + offset, length = self._prefetch_extents[num] + self._prefetch_data[offset] = data + del self._prefetch_extents[num] + if len(self._prefetch_extents) == 0: + self._prefetch_done = True + break + def _check_exception(self): """if there's a saved exception, raise & clear it""" if self._saved_exception is not None: diff --git a/paramiko/sftp_handle.py b/paramiko/sftp_handle.py index edceb5ad..ca473900 100644 --- a/paramiko/sftp_handle.py +++ b/paramiko/sftp_handle.py @@ -30,10 +30,10 @@ class SFTPHandle (ClosingContextManager): Abstract object representing a handle to an open file (or folder) in an SFTP server implementation. Each handle has a string representation used by the client to refer to the underlying file. - + Server implementations can (and should) subclass SFTPHandle to implement features of a file handle, like `stat` or `chattr`. - + Instances of this class may be used as context managers. """ def __init__(self, flags=0): @@ -41,8 +41,9 @@ class SFTPHandle (ClosingContextManager): Create a new file handle representing a local file being served over SFTP. If ``flags`` is passed in, it's used to determine if the file is open in append mode. - - :param int flags: optional flags as passed to `.SFTPServerInterface.open` + + :param int flags: optional flags as passed to + `.SFTPServerInterface.open` """ self.__flags = flags self.__name = None @@ -55,7 +56,7 @@ class SFTPHandle (ClosingContextManager): When a client closes a file, this method is called on the handle. Normally you would use this method to close the underlying OS level file object(s). - + The default implementation checks for attributes on ``self`` named ``readfile`` and/or ``writefile``, and if either or both are present, their ``close()`` methods are called. This means that if you are @@ -76,7 +77,7 @@ class SFTPHandle (ClosingContextManager): to be 64 bits. If the end of the file has been reached, this method may return an - empty string to signify EOF, or it may also return `.SFTP_EOF`. + empty string to signify EOF, or it may also return ``SFTP_EOF``. The default implementation checks for an attribute on ``self`` named ``readfile``, and if present, performs the read operation on the Python @@ -84,7 +85,6 @@ class SFTPHandle (ClosingContextManager): common case where you are wrapping a Python file object.) :param offset: position in the file to start reading from. - :type offset: int or long :param int length: number of bytes to attempt to read. :return: data read from the file, or an SFTP error code, as a `str`. """ @@ -117,11 +117,10 @@ class SFTPHandle (ClosingContextManager): differently from ``readfile`` to make it easy to implement read-only (or write-only) files, but if both attributes are present, they should refer to the same file. - + :param offset: position in the file to start reading from. - :type offset: int or long :param str data: data to write into the file. - :return: an SFTP error code like `.SFTP_OK`. + :return: an SFTP error code like ``SFTP_OK``. """ writefile = getattr(self, 'writefile', None) if writefile is None: @@ -151,7 +150,7 @@ class SFTPHandle (ClosingContextManager): :return: an attributes object for the given file, or an SFTP error code - (like `.SFTP_PERMISSION_DENIED`). + (like ``SFTP_PERMISSION_DENIED``). :rtype: `.SFTPAttributes` or error code """ return SFTP_OP_UNSUPPORTED @@ -163,11 +162,11 @@ class SFTPHandle (ClosingContextManager): check for the presence of fields before using them. :param .SFTPAttributes attr: the attributes to change on this file. - :return: an `int` error code like `.SFTP_OK`. + :return: an `int` error code like ``SFTP_OK``. """ return SFTP_OP_UNSUPPORTED - ### internals... + # ...internals... def _set_files(self, files): """ @@ -179,7 +178,7 @@ class SFTPHandle (ClosingContextManager): def _get_next_files(self): """ - Used by the SFTP server code to retreive a cached directory + Used by the SFTP server code to retrieve a cached directory listing. """ fnlist = self.__files[:16] diff --git a/paramiko/sftp_server.py b/paramiko/sftp_server.py index 5d161b74..49e5491e 100644 --- a/paramiko/sftp_server.py +++ b/paramiko/sftp_server.py @@ -26,8 +26,9 @@ import sys from hashlib import md5, sha1 from paramiko import util -from paramiko.sftp import BaseSFTP, Message, SFTP_FAILURE, \ - SFTP_PERMISSION_DENIED, SFTP_NO_SUCH_FILE +from paramiko.sftp import ( + BaseSFTP, Message, SFTP_FAILURE, SFTP_PERMISSION_DENIED, SFTP_NO_SUCH_FILE, +) from paramiko.sftp_si import SFTPServerInterface from paramiko.sftp_attr import SFTPAttributes from paramiko.common import DEBUG @@ -36,13 +37,15 @@ from paramiko.server import SubsystemHandler # known hash algorithms for the "check-file" extension -from paramiko.sftp import CMD_HANDLE, SFTP_DESC, CMD_STATUS, SFTP_EOF, CMD_NAME, \ - SFTP_BAD_MESSAGE, CMD_EXTENDED_REPLY, SFTP_FLAG_READ, SFTP_FLAG_WRITE, \ - SFTP_FLAG_APPEND, SFTP_FLAG_CREATE, SFTP_FLAG_TRUNC, SFTP_FLAG_EXCL, \ - CMD_NAMES, CMD_OPEN, CMD_CLOSE, SFTP_OK, CMD_READ, CMD_DATA, CMD_WRITE, \ - CMD_REMOVE, CMD_RENAME, CMD_MKDIR, CMD_RMDIR, CMD_OPENDIR, CMD_READDIR, \ - CMD_STAT, CMD_ATTRS, CMD_LSTAT, CMD_FSTAT, CMD_SETSTAT, CMD_FSETSTAT, \ - CMD_READLINK, CMD_SYMLINK, CMD_REALPATH, CMD_EXTENDED, SFTP_OP_UNSUPPORTED +from paramiko.sftp import ( + CMD_HANDLE, SFTP_DESC, CMD_STATUS, SFTP_EOF, CMD_NAME, SFTP_BAD_MESSAGE, + CMD_EXTENDED_REPLY, SFTP_FLAG_READ, SFTP_FLAG_WRITE, SFTP_FLAG_APPEND, + SFTP_FLAG_CREATE, SFTP_FLAG_TRUNC, SFTP_FLAG_EXCL, CMD_NAMES, CMD_OPEN, + CMD_CLOSE, SFTP_OK, CMD_READ, CMD_DATA, CMD_WRITE, CMD_REMOVE, CMD_RENAME, + CMD_MKDIR, CMD_RMDIR, CMD_OPENDIR, CMD_READDIR, CMD_STAT, CMD_ATTRS, + CMD_LSTAT, CMD_FSTAT, CMD_SETSTAT, CMD_FSETSTAT, CMD_READLINK, CMD_SYMLINK, + CMD_REALPATH, CMD_EXTENDED, SFTP_OP_UNSUPPORTED, +) _hash_class = { 'sha1': sha1, @@ -57,7 +60,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler): Use `.Transport.set_subsystem_handler` to activate this class. """ - def __init__(self, channel, name, server, sftp_si=SFTPServerInterface, *largs, **kwargs): + def __init__(self, channel, name, server, sftp_si=SFTPServerInterface, + *largs, **kwargs): """ The constructor for SFTPServer is meant to be called from within the `.Transport` as a subsystem handler. ``server`` and any additional @@ -68,7 +72,7 @@ class SFTPServer (BaseSFTP, SubsystemHandler): :param str name: name of the requested subsystem. :param .ServerInterface server: the server object associated with this channel and subsystem - :param class sftp_si: + :param sftp_si: a subclass of `.SFTPServerInterface` to use for handling individual requests. """ @@ -86,9 +90,13 @@ class SFTPServer (BaseSFTP, SubsystemHandler): def _log(self, level, msg): if issubclass(type(msg), list): for m in msg: - super(SFTPServer, self)._log(level, "[chan " + self.sock.get_name() + "] " + m) + super(SFTPServer, self)._log( + level, + "[chan " + self.sock.get_name() + "] " + m) else: - super(SFTPServer, self)._log(level, "[chan " + self.sock.get_name() + "] " + msg) + super(SFTPServer, self)._log( + level, + "[chan " + self.sock.get_name() + "] " + msg) def start_subsystem(self, name, transport, channel): self.sock = channel @@ -121,7 +129,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler): def finish_subsystem(self): self.server.session_ended() super(SFTPServer, self).finish_subsystem() - # close any file handles that were left open (so we can return them to the OS quickly) + # close any file handles that were left open + # (so we can return them to the OS quickly) for f in self.file_table.values(): f.close() for f in self.folder_table.values(): @@ -175,7 +184,7 @@ class SFTPServer (BaseSFTP, SubsystemHandler): with open(filename, 'w+') as f: f.truncate(attr.st_size) - ### internals... + # ...internals... def _response(self, request_number, t, *arg): msg = Message() @@ -190,7 +199,9 @@ class SFTPServer (BaseSFTP, SubsystemHandler): elif type(item) is SFTPAttributes: item._pack(msg) else: - raise Exception('unknown type for ' + repr(item) + ' type ' + repr(type(item))) + raise Exception( + 'unknown type for {0!r} type {1!r}'.format( + item, type(item))) self._send_packet(t, msg) def _send_handle_response(self, request_number, handle, folder=False): @@ -212,7 +223,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler): desc = SFTP_DESC[code] except IndexError: desc = 'Unknown' - # some clients expect a "langauge" tag at the end (but don't mind it being blank) + # some clients expect a "langauge" tag at the end + # (but don't mind it being blank) self._response(request_number, CMD_STATUS, code, desc, '') def _open_folder(self, request_number, path): @@ -251,7 +263,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler): length = msg.get_int64() block_size = msg.get_int() if handle not in self.file_table: - self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle') + self._send_status( + request_number, SFTP_BAD_MESSAGE, 'Invalid handle') return f = self.file_table[handle] for x in alg_list: @@ -260,7 +273,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler): alg = _hash_class[x] break else: - self._send_status(request_number, SFTP_FAILURE, 'No supported hash types found') + self._send_status( + request_number, SFTP_FAILURE, 'No supported hash types found') return if length == 0: st = f.stat() @@ -271,7 +285,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler): if block_size == 0: block_size = length if block_size < 256: - self._send_status(request_number, SFTP_FAILURE, 'Block size too small') + self._send_status( + request_number, SFTP_FAILURE, 'Block size too small') return sum_out = bytes() @@ -285,7 +300,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler): while count < blocklen: data = f.read(offset, chunklen) if not isinstance(data, bytes_types): - self._send_status(request_number, data, 'Unable to hash file') + self._send_status( + request_number, data, 'Unable to hash file') return hash_obj.update(data) count += len(data) @@ -323,7 +339,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler): path = msg.get_text() flags = self._convert_pflags(msg.get_int()) attr = SFTPAttributes._from_msg(msg) - self._send_handle_response(request_number, self.server.open(path, flags, attr)) + self._send_handle_response( + request_number, self.server.open(path, flags, attr)) elif t == CMD_CLOSE: handle = msg.get_binary() if handle in self.folder_table: @@ -335,13 +352,15 @@ class SFTPServer (BaseSFTP, SubsystemHandler): del self.file_table[handle] self._send_status(request_number, SFTP_OK) return - self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle') + self._send_status( + request_number, SFTP_BAD_MESSAGE, 'Invalid handle') elif t == CMD_READ: handle = msg.get_binary() offset = msg.get_int64() length = msg.get_int() if handle not in self.file_table: - self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle') + self._send_status( + request_number, SFTP_BAD_MESSAGE, 'Invalid handle') return data = self.file_table[handle].read(offset, length) if isinstance(data, (bytes_types, string_types)): @@ -356,16 +375,19 @@ class SFTPServer (BaseSFTP, SubsystemHandler): offset = msg.get_int64() data = msg.get_binary() if handle not in self.file_table: - self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle') + self._send_status( + request_number, SFTP_BAD_MESSAGE, 'Invalid handle') return - self._send_status(request_number, self.file_table[handle].write(offset, data)) + self._send_status( + request_number, self.file_table[handle].write(offset, data)) elif t == CMD_REMOVE: path = msg.get_text() self._send_status(request_number, self.server.remove(path)) elif t == CMD_RENAME: oldpath = msg.get_text() newpath = msg.get_text() - self._send_status(request_number, self.server.rename(oldpath, newpath)) + self._send_status( + request_number, self.server.rename(oldpath, newpath)) elif t == CMD_MKDIR: path = msg.get_text() attr = SFTPAttributes._from_msg(msg) @@ -380,7 +402,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler): elif t == CMD_READDIR: handle = msg.get_binary() if handle not in self.folder_table: - self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle') + self._send_status( + request_number, SFTP_BAD_MESSAGE, 'Invalid handle') return folder = self.folder_table[handle] self._read_folder(request_number, folder) @@ -401,7 +424,8 @@ class SFTPServer (BaseSFTP, SubsystemHandler): elif t == CMD_FSTAT: handle = msg.get_binary() if handle not in self.file_table: - self._send_status(request_number, SFTP_BAD_MESSAGE, 'Invalid handle') + self._send_status( + request_number, SFTP_BAD_MESSAGE, 'Invalid handle') return resp = self.file_table[handle].stat() if issubclass(type(resp), SFTPAttributes): @@ -416,25 +440,31 @@ class SFTPServer (BaseSFTP, SubsystemHandler): handle = msg.get_binary() attr = SFTPAttributes._from_msg(msg) if handle not in self.file_table: - self._response(request_number, SFTP_BAD_MESSAGE, 'Invalid handle') + self._response( + request_number, SFTP_BAD_MESSAGE, 'Invalid handle') return - self._send_status(request_number, self.file_table[handle].chattr(attr)) + self._send_status( + request_number, self.file_table[handle].chattr(attr)) elif t == CMD_READLINK: path = msg.get_text() resp = self.server.readlink(path) if isinstance(resp, (bytes_types, string_types)): - self._response(request_number, CMD_NAME, 1, resp, '', SFTPAttributes()) + self._response( + request_number, CMD_NAME, 1, resp, '', SFTPAttributes()) else: self._send_status(request_number, resp) elif t == CMD_SYMLINK: - # the sftp 2 draft is incorrect here! path always follows target_path + # the sftp 2 draft is incorrect here! + # path always follows target_path target_path = msg.get_text() path = msg.get_text() - self._send_status(request_number, self.server.symlink(target_path, path)) + self._send_status( + request_number, self.server.symlink(target_path, path)) elif t == CMD_REALPATH: path = msg.get_text() rpath = self.server.canonicalize(path) - self._response(request_number, CMD_NAME, 1, rpath, '', SFTPAttributes()) + self._response( + request_number, CMD_NAME, 1, rpath, '', SFTPAttributes()) elif t == CMD_EXTENDED: tag = msg.get_text() if tag == 'check-file': diff --git a/paramiko/sftp_si.py b/paramiko/sftp_si.py index e899108d..bfe7b7c7 100644 --- a/paramiko/sftp_si.py +++ b/paramiko/sftp_si.py @@ -35,16 +35,15 @@ class SFTPServerInterface (object): SFTP sessions). However, raising an exception will usually cause the SFTP session to abruptly end, so you will usually want to catch exceptions and return an appropriate error code. - + All paths are in string form instead of unicode because not all SFTP clients & servers obey the requirement that paths be encoded in UTF-8. """ - def __init__(self, server, *largs, **kwargs): """ Create a new SFTPServerInterface object. This method does nothing by default and is meant to be overridden by subclasses. - + :param .ServerInterface server: the server object associated with this channel and SFTP subsystem """ @@ -73,7 +72,7 @@ class SFTPServerInterface (object): on that file. On success, a new object subclassed from `.SFTPHandle` should be returned. This handle will be used for future operations on the file (read, write, etc). On failure, an error code such as - `.SFTP_PERMISSION_DENIED` should be returned. + ``SFTP_PERMISSION_DENIED`` should be returned. ``flags`` contains the requested mode for opening (read-only, write-append, etc) as a bitset of flags from the ``os`` module: @@ -92,7 +91,7 @@ class SFTPServerInterface (object): The ``attr`` object contains requested attributes of the file if it has to be created. Some or all attribute fields may be missing if the client didn't specify them. - + .. note:: The SFTP protocol defines all files to be in "binary" mode. There is no equivalent to Python's "text" mode. @@ -121,13 +120,14 @@ class SFTPServerInterface (object): `.SFTPAttributes.from_stat` will usually do what you want. In case of an error, you should return one of the ``SFTP_*`` error - codes, such as `.SFTP_PERMISSION_DENIED`. + codes, such as ``SFTP_PERMISSION_DENIED``. - :param str path: the requested path (relative or absolute) to be listed. + :param str path: the requested path (relative or absolute) to be + listed. :return: a list of the files in the given folder, using `.SFTPAttributes` objects. - + .. note:: You should normalize the given ``path`` first (see the `os.path` module) and check appropriate permissions before returning the list @@ -150,7 +150,7 @@ class SFTPServerInterface (object): for. :return: an `.SFTPAttributes` object for the given file, or an SFTP error - code (like `.SFTP_PERMISSION_DENIED`). + code (like ``SFTP_PERMISSION_DENIED``). """ return SFTP_OP_UNSUPPORTED @@ -168,7 +168,7 @@ class SFTPServerInterface (object): :type path: str :return: an `.SFTPAttributes` object for the given file, or an SFTP error - code (like `.SFTP_PERMISSION_DENIED`). + code (like ``SFTP_PERMISSION_DENIED``). """ return SFTP_OP_UNSUPPORTED @@ -178,7 +178,7 @@ class SFTPServerInterface (object): :param str path: the requested path (relative or absolute) of the file to delete. - :return: an SFTP error code `int` like `.SFTP_OK`. + :return: an SFTP error code `int` like ``SFTP_OK``. """ return SFTP_OP_UNSUPPORTED @@ -189,7 +189,7 @@ class SFTPServerInterface (object): and since there's no other (easy) way to move files via SFTP, it's probably a good idea to implement "move" in this method too, even for files that cross disk partition boundaries, if at all possible. - + .. note:: You should return an error if a file with the same name as ``newpath`` already exists. (The rename operation should be non-desctructive.) @@ -197,7 +197,7 @@ class SFTPServerInterface (object): :param str oldpath: the requested path (relative or absolute) of the existing file. :param str newpath: the requested new path of the file. - :return: an SFTP error code `int` like `.SFTP_OK`. + :return: an SFTP error code `int` like ``SFTP_OK``. """ return SFTP_OP_UNSUPPORTED @@ -220,13 +220,13 @@ class SFTPServerInterface (object): The ``attr`` object will contain only those fields provided by the client in its request, so you should use ``hasattr`` to check for - the presense of fields before using them. In some cases, the ``attr`` + the presence of fields before using them. In some cases, the ``attr`` object may be completely empty. :param str path: requested path (relative or absolute) of the new folder. :param .SFTPAttributes attr: requested attributes of the new folder. - :return: an SFTP error code `int` like `.SFTP_OK`. + :return: an SFTP error code `int` like ``SFTP_OK``. """ return SFTP_OP_UNSUPPORTED @@ -238,7 +238,7 @@ class SFTPServerInterface (object): :param str path: requested path (relative or absolute) of the folder to remove. - :return: an SFTP error code `int` like `.SFTP_OK`. + :return: an SFTP error code `int` like ``SFTP_OK``. """ return SFTP_OP_UNSUPPORTED @@ -253,7 +253,7 @@ class SFTPServerInterface (object): :param attr: requested attributes to change on the file (an `.SFTPAttributes` object) - :return: an error code `int` like `.SFTP_OK`. + :return: an error code `int` like ``SFTP_OK``. """ return SFTP_OP_UNSUPPORTED @@ -279,25 +279,25 @@ class SFTPServerInterface (object): # on windows, normalize backslashes to sftp/posix format out = out.replace('\\', '/') return out - + def readlink(self, path): """ Return the target of a symbolic link (or shortcut) on the server. If the specified path doesn't refer to a symbolic link, an error should be returned. - + :param str path: path (relative or absolute) of the symbolic link. :return: the target `str` path of the symbolic link, or an error code like - `.SFTP_NO_SUCH_FILE`. + ``SFTP_NO_SUCH_FILE``. """ return SFTP_OP_UNSUPPORTED - + def symlink(self, target_path, path): """ Create a symbolic link on the server, as new pathname ``path``, with ``target_path`` as the target of the link. - + :param str target_path: path (relative or absolute) of the target for this new symbolic link. diff --git a/paramiko/ssh_exception.py b/paramiko/ssh_exception.py index b99e42b3..e9ab8d66 100644 --- a/paramiko/ssh_exception.py +++ b/paramiko/ssh_exception.py @@ -16,6 +16,8 @@ # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +import socket + class SSHException (Exception): """ @@ -29,11 +31,11 @@ class AuthenticationException (SSHException): Exception raised when authentication failed for some reason. It may be possible to retry with different credentials. (Other classes specify more specific reasons.) - + .. versionadded:: 1.6 """ pass - + class PasswordRequiredException (AuthenticationException): """ @@ -47,15 +49,13 @@ class BadAuthenticationType (AuthenticationException): Exception raised when an authentication type (like password) is used, but the server isn't allowing that type. (It may only allow public-key, for example.) - - :ivar list allowed_types: - list of allowed authentication types provided by the server (possible - values are: ``"none"``, ``"password"``, and ``"publickey"``). - + .. versionadded:: 1.1 """ + #: list of allowed authentication types provided by the server (possible + #: values are: ``"none"``, ``"password"``, and ``"publickey"``). allowed_types = [] - + def __init__(self, explanation, types): AuthenticationException.__init__(self, explanation) self.allowed_types = types @@ -63,7 +63,9 @@ class BadAuthenticationType (AuthenticationException): self.args = (explanation, types, ) def __str__(self): - return SSHException.__str__(self) + ' (allowed_types=%r)' % self.allowed_types + return '{0} (allowed_types={1!r})'.format( + SSHException.__str__(self), self.allowed_types + ) class PartialAuthentication (AuthenticationException): @@ -71,7 +73,7 @@ class PartialAuthentication (AuthenticationException): An internal exception thrown in the case of partial authentication. """ allowed_types = [] - + def __init__(self, types): AuthenticationException.__init__(self, 'partial authentication') self.allowed_types = types @@ -82,9 +84,9 @@ class PartialAuthentication (AuthenticationException): class ChannelException (SSHException): """ Exception raised when an attempt to open a new `.Channel` fails. - - :ivar int code: the error code returned by the server - + + :param int code: the error code returned by the server + .. versionadded:: 1.6 """ def __init__(self, code, text): @@ -97,15 +99,19 @@ class ChannelException (SSHException): class BadHostKeyException (SSHException): """ The host key given by the SSH server did not match what we were expecting. - - :ivar str hostname: the hostname of the SSH server - :ivar PKey got_key: the host key presented by the server - :ivar PKey expected_key: the host key expected - + + :param str hostname: the hostname of the SSH server + :param PKey got_key: the host key presented by the server + :param PKey expected_key: the host key expected + .. versionadded:: 1.6 """ def __init__(self, hostname, got_key, expected_key): - SSHException.__init__(self, 'Host key for server %s does not match!' % hostname) + message = 'Host key for server {0} does not match: got {1}, expected {2}' # noqa + message = message.format( + hostname, got_key.get_base64(), + expected_key.get_base64()) + SSHException.__init__(self, message) self.hostname = hostname self.key = got_key self.expected_key = expected_key @@ -117,8 +123,8 @@ class ProxyCommandFailure (SSHException): """ The "ProxyCommand" found in the .ssh/config file returned an error. - :ivar str command: The command line that is generating this exception. - :ivar str error: The error captured from the proxy command output. + :param str command: The command line that is generating this exception. + :param str error: The error captured from the proxy command output. """ def __init__(self, command, error): SSHException.__init__(self, @@ -129,3 +135,47 @@ class ProxyCommandFailure (SSHException): self.error = error # for unpickling self.args = (command, error, ) + + +class NoValidConnectionsError(socket.error): + """ + Multiple connection attempts were made and no families succeeded. + + This exception class wraps multiple "real" underlying connection errors, + all of which represent failed connection attempts. Because these errors are + not guaranteed to all be of the same error type (i.e. different errno, + `socket.error` subclass, message, etc) we expose a single unified error + message and a ``None`` errno so that instances of this class match most + normal handling of `socket.error` objects. + + To see the wrapped exception objects, access the ``errors`` attribute. + ``errors`` is a dict whose keys are address tuples (e.g. ``('127.0.0.1', + 22)``) and whose values are the exception encountered trying to connect to + that address. + + It is implied/assumed that all the errors given to a single instance of + this class are from connecting to the same hostname + port (and thus that + the differences are in the resolution of the hostname - e.g. IPv4 vs v6). + + .. versionadded:: 1.16 + """ + def __init__(self, errors): + """ + :param dict errors: + The errors dict to store, as described by class docstring. + """ + addrs = sorted(errors.keys()) + body = ', '.join([x[0] for x in addrs[:-1]]) + tail = addrs[-1][0] + if body: + msg = "Unable to connect to port {0} on {1} or {2}" + else: + msg = "Unable to connect to port {0} on {2}" + super(NoValidConnectionsError, self).__init__( + None, # stand-in for errno + msg.format(addrs[0][1], body, tail) + ) + self.errors = errors + + def __reduce__(self): + return (self.__class__, (self.errors, )) diff --git a/paramiko/ssh_gss.py b/paramiko/ssh_gss.py index ebf2cc80..414485f9 100644 --- a/paramiko/ssh_gss.py +++ b/paramiko/ssh_gss.py @@ -20,7 +20,7 @@ """ -This module provides GSS-API / SSPI authentication as defined in RFC 4462. +This module provides GSS-API / SSPI authentication as defined in :rfc:`4462`. .. note:: Credential delegation is not supported in server mode. @@ -39,22 +39,8 @@ import sys """ GSS_AUTH_AVAILABLE = True -try: - from pyasn1.type.univ import ObjectIdentifier - from pyasn1.codec.der import encoder, decoder -except ImportError: - GSS_AUTH_AVAILABLE = False - class ObjectIdentifier(object): - def __init__(self, *args): - raise NotImplementedError("Module pyasn1 not importable") - - class decoder(object): - def decode(self): - raise NotImplementedError("Module pyasn1 not importable") - - class encoder(object): - def encode(self): - raise NotImplementedError("Module pyasn1 not importable") +from pyasn1.type.univ import ObjectIdentifier +from pyasn1.codec.der import encoder, decoder from paramiko.common import MSG_USERAUTH_REQUEST from paramiko.ssh_exception import SSHException @@ -86,9 +72,8 @@ def GSSAuth(auth_method, gss_deleg_creds=True): We delegate credentials by default. :return: Either an `._SSH_GSSAPI` (Unix) object or an `_SSH_SSPI` (Windows) object - :rtype: Object - :raise ImportError: If no GSS-API / SSPI module could be imported. + :raises: ``ImportError`` -- If no GSS-API / SSPI module could be imported. :see: `RFC 4462 <http://www.ietf.org/rfc/rfc4462.txt>`_ :note: Check for the available API and return either an `._SSH_GSSAPI` @@ -145,7 +130,6 @@ class _SSH_GSSAuth(object): as the only service value. :param str service: The desired SSH service - :rtype: Void """ if service.find("ssh-"): self._service = service @@ -156,7 +140,6 @@ class _SSH_GSSAuth(object): username is not set by C{ssh_init_sec_context}. :param str username: The name of the user who attempts to login - :rtype: Void """ self._username = username @@ -169,7 +152,6 @@ class _SSH_GSSAuth(object): :return: A byte sequence containing the number of supported OIDs, the length of the OID and the actual OID encoded with DER - :rtype: Bytes :note: In server mode we just return the OID length and the DER encoded OID. """ @@ -186,7 +168,6 @@ class _SSH_GSSAuth(object): :param str desired_mech: The desired GSS-API mechanism of the client :return: ``True`` if the given OID is supported, otherwise C{False} - :rtype: Boolean """ mech, __ = decoder.decode(desired_mech) if mech.__str__() != self._krb5_mech: @@ -194,14 +175,13 @@ class _SSH_GSSAuth(object): return True # Internals - #-------------------------------------------------------------------------- + # ------------------------------------------------------------------------- def _make_uint32(self, integer): """ Create a 32 bit unsigned integer (The byte sequence of an integer). :param int integer: The integer value to convert :return: The byte sequence of an 32 bit integer - :rtype: Bytes """ return struct.pack("!I", integer) @@ -221,7 +201,6 @@ class _SSH_GSSAuth(object): string service (ssh-connection), string authentication-method (gssapi-with-mic or gssapi-keyex) - :rtype: Bytes """ mic = self._make_uint32(len(session_id)) mic += session_id @@ -270,11 +249,11 @@ class _SSH_GSSAPI(_SSH_GSSAuth): ("pseudo negotiated" mechanism, because we support just the krb5 mechanism :-)) :param str recv_token: The GSS-API token received from the Server - :raise SSHException: Is raised if the desired mechanism of the client - is not supported - :return: A ``String`` if the GSS-API has returned a token or ``None`` if - no token was returned - :rtype: String or None + :raises: + `.SSHException` -- Is raised if the desired mechanism of the client + is not supported + :return: A ``String`` if the GSS-API has returned a token or + ``None`` if no token was returned """ self._username = username self._gss_host = target @@ -300,8 +279,9 @@ class _SSH_GSSAPI(_SSH_GSSAuth): else: token = self._gss_ctxt.step(recv_token) except gssapi.GSSException: - raise gssapi.GSSException("{0} Target: {1}".format(sys.exc_info()[1], - self._gss_host)) + message = "{0} Target: {1}".format( + sys.exc_info()[1], self._gss_host) + raise gssapi.GSSException(message) self._gss_ctxt_status = self._gss_ctxt.established return token @@ -317,8 +297,6 @@ class _SSH_GSSAPI(_SSH_GSSAuth): gssapi-keyex: Returns the MIC token from GSS-API with the SSH session ID as message. - :rtype: String - :see: `._ssh_build_mic` """ self._session_id = session_id if not gss_kex: @@ -342,7 +320,6 @@ class _SSH_GSSAPI(_SSH_GSSAuth): if it's not the initial call. :return: A ``String`` if the GSS-API has returned a token or ``None`` if no token was returned - :rtype: String or None """ # hostname and username are not required for GSSAPI, but for SSPI self._gss_host = hostname @@ -360,8 +337,8 @@ class _SSH_GSSAPI(_SSH_GSSAuth): :param str mic_token: The MIC token received from the client :param str session_id: The SSH session ID :param str username: The name of the user who attempts to login - :return: 0 if the MIC check was successful and 1 if it fails - :rtype: int + :return: None if the MIC check was successful + :raises: ``gssapi.GSSException`` -- if the MIC check failed """ self._session_id = session_id self._username = username @@ -371,11 +348,7 @@ class _SSH_GSSAPI(_SSH_GSSAuth): self._username, self._service, self._auth_method) - try: - self._gss_srv_ctxt.verify_mic(mic_field, - mic_token) - except gssapi.BadSignature: - raise Exception("GSS-API MIC check failed.") + self._gss_srv_ctxt.verify_mic(mic_field, mic_token) else: # for key exchange with gssapi-keyex # client mode @@ -388,7 +361,6 @@ class _SSH_GSSAPI(_SSH_GSSAuth): Checks if credentials are delegated (server mode). :return: ``True`` if credentials are delegated, otherwise ``False`` - :rtype: bool """ if self._gss_srv_ctxt.delegated_cred is not None: return True @@ -401,8 +373,9 @@ class _SSH_GSSAPI(_SSH_GSSAuth): (server mode). :param str client_token: The GSS-API token received form the client - :raise NotImplementedError: Credential delegation is currently not - supported in server mode + :raises: + ``NotImplementedError`` -- Credential delegation is currently not + supported in server mode """ raise NotImplementedError @@ -422,12 +395,16 @@ class _SSH_SSPI(_SSH_GSSAuth): _SSH_GSSAuth.__init__(self, auth_method, gss_deleg_creds) if self._gss_deleg_creds: - self._gss_flags = sspicon.ISC_REQ_INTEGRITY |\ - sspicon.ISC_REQ_MUTUAL_AUTH |\ - sspicon.ISC_REQ_DELEGATE + self._gss_flags = ( + sspicon.ISC_REQ_INTEGRITY | + sspicon.ISC_REQ_MUTUAL_AUTH | + sspicon.ISC_REQ_DELEGATE + ) else: - self._gss_flags = sspicon.ISC_REQ_INTEGRITY |\ - sspicon.ISC_REQ_MUTUAL_AUTH + self._gss_flags = ( + sspicon.ISC_REQ_INTEGRITY | + sspicon.ISC_REQ_MUTUAL_AUTH + ) def ssh_init_sec_context(self, target, desired_mech=None, username=None, recv_token=None): @@ -440,11 +417,11 @@ class _SSH_SSPI(_SSH_GSSAuth): ("pseudo negotiated" mechanism, because we support just the krb5 mechanism :-)) :param recv_token: The SSPI token received from the Server - :raise SSHException: Is raised if the desired mechanism of the client - is not supported + :raises: + `.SSHException` -- Is raised if the desired mechanism of the client + is not supported :return: A ``String`` if the SSPI has returned a token or ``None`` if no token was returned - :rtype: String or None """ self._username = username self._gss_host = target @@ -489,8 +466,6 @@ class _SSH_SSPI(_SSH_GSSAuth): gssapi-keyex: Returns the MIC token from SSPI with the SSH session ID as message. - :rtype: String - :see: `._ssh_build_mic` """ self._session_id = session_id if not gss_kex: @@ -514,7 +489,6 @@ class _SSH_SSPI(_SSH_GSSAuth): if it's not the initial call. :return: A ``String`` if the SSPI has returned a token or ``None`` if no token was returned - :rtype: String or None """ self._gss_host = hostname self._username = username @@ -534,31 +508,26 @@ class _SSH_SSPI(_SSH_GSSAuth): :param str mic_token: The MIC token received from the client :param str session_id: The SSH session ID :param str username: The name of the user who attempts to login - :return: 0 if the MIC check was successful - :rtype: int + :return: None if the MIC check was successful + :raises: ``sspi.error`` -- if the MIC check failed """ self._session_id = session_id self._username = username - mic_status = 1 if username is not None: # server mode mic_field = self._ssh_build_mic(self._session_id, self._username, self._service, self._auth_method) - mic_status = self._gss_srv_ctxt.verify(mic_field, - mic_token) + # Verifies data and its signature. If verification fails, an + # sspi.error will be raised. + self._gss_srv_ctxt.verify(mic_field, mic_token) else: # for key exchange with gssapi-keyex # client mode - mic_status = self._gss_ctxt.verify(self._session_id, - mic_token) - """ - The SSPI method C{verify} has no return value, so if no SSPI error - is returned, set C{mic_status} to 0. - """ - mic_status = 0 - return mic_status + # Verifies data and its signature. If verification fails, an + # sspi.error will be raised. + self._gss_ctxt.verify(self._session_id, mic_token) @property def credentials_delegated(self): @@ -566,13 +535,11 @@ class _SSH_SSPI(_SSH_GSSAuth): Checks if credentials are delegated (server mode). :return: ``True`` if credentials are delegated, otherwise ``False`` - :rtype: Boolean """ return ( - self._gss_flags & sspicon.ISC_REQ_DELEGATE - ) and ( - self._gss_srv_ctxt_status or (self._gss_flags) - ) + self._gss_flags & sspicon.ISC_REQ_DELEGATE and + (self._gss_srv_ctxt_status or self._gss_flags) + ) def save_client_creds(self, client_token): """ @@ -581,7 +548,8 @@ class _SSH_SSPI(_SSH_GSSAuth): (server mode). :param str client_token: The SSPI token received form the client - :raise NotImplementedError: Credential delegation is currently not - supported in server mode + :raises: + ``NotImplementedError`` -- Credential delegation is currently not + supported in server mode """ raise NotImplementedError diff --git a/paramiko/transport.py b/paramiko/transport.py index 36da3043..e5218da4 100644 --- a/paramiko/transport.py +++ b/paramiko/transport.py @@ -1,4 +1,5 @@ # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com> +# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com> # # This file is part of paramiko. # @@ -20,55 +21,58 @@ Core protocol implementation """ +from __future__ import print_function import os import socket import sys import threading import time import weakref -from hashlib import md5, sha1 +from hashlib import md5, sha1, sha256, sha512 + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes import paramiko from paramiko import util from paramiko.auth_handler import AuthHandler from paramiko.ssh_gss import GSSAuth from paramiko.channel import Channel -from paramiko.common import xffffffff, cMSG_CHANNEL_OPEN, cMSG_IGNORE, \ - cMSG_GLOBAL_REQUEST, DEBUG, MSG_KEXINIT, MSG_IGNORE, MSG_DISCONNECT, \ - MSG_DEBUG, ERROR, WARNING, cMSG_UNIMPLEMENTED, INFO, cMSG_KEXINIT, \ - cMSG_NEWKEYS, MSG_NEWKEYS, cMSG_REQUEST_SUCCESS, cMSG_REQUEST_FAILURE, \ - CONNECTION_FAILED_CODE, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, \ - OPEN_SUCCEEDED, cMSG_CHANNEL_OPEN_FAILURE, cMSG_CHANNEL_OPEN_SUCCESS, \ - MSG_GLOBAL_REQUEST, MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE, \ - MSG_CHANNEL_OPEN_SUCCESS, MSG_CHANNEL_OPEN_FAILURE, MSG_CHANNEL_OPEN, \ - MSG_CHANNEL_SUCCESS, MSG_CHANNEL_FAILURE, MSG_CHANNEL_DATA, \ - MSG_CHANNEL_EXTENDED_DATA, MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_REQUEST, \ - MSG_CHANNEL_EOF, MSG_CHANNEL_CLOSE, MIN_WINDOW_SIZE, MIN_PACKET_SIZE, \ - MAX_WINDOW_SIZE, DEFAULT_WINDOW_SIZE, DEFAULT_MAX_PACKET_SIZE +from paramiko.common import ( + xffffffff, cMSG_CHANNEL_OPEN, cMSG_IGNORE, cMSG_GLOBAL_REQUEST, DEBUG, + MSG_KEXINIT, MSG_IGNORE, MSG_DISCONNECT, MSG_DEBUG, ERROR, WARNING, + cMSG_UNIMPLEMENTED, INFO, cMSG_KEXINIT, cMSG_NEWKEYS, MSG_NEWKEYS, + cMSG_REQUEST_SUCCESS, cMSG_REQUEST_FAILURE, CONNECTION_FAILED_CODE, + OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_SUCCEEDED, + cMSG_CHANNEL_OPEN_FAILURE, cMSG_CHANNEL_OPEN_SUCCESS, MSG_GLOBAL_REQUEST, + MSG_REQUEST_SUCCESS, MSG_REQUEST_FAILURE, MSG_CHANNEL_OPEN_SUCCESS, + MSG_CHANNEL_OPEN_FAILURE, MSG_CHANNEL_OPEN, MSG_CHANNEL_SUCCESS, + MSG_CHANNEL_FAILURE, MSG_CHANNEL_DATA, MSG_CHANNEL_EXTENDED_DATA, + MSG_CHANNEL_WINDOW_ADJUST, MSG_CHANNEL_REQUEST, MSG_CHANNEL_EOF, + MSG_CHANNEL_CLOSE, MIN_WINDOW_SIZE, MIN_PACKET_SIZE, MAX_WINDOW_SIZE, + DEFAULT_WINDOW_SIZE, DEFAULT_MAX_PACKET_SIZE, +) from paramiko.compress import ZlibCompressor, ZlibDecompressor from paramiko.dsskey import DSSKey -from paramiko.kex_gex import KexGex +from paramiko.ed25519key import Ed25519Key +from paramiko.kex_gex import KexGex, KexGexSHA256 from paramiko.kex_group1 import KexGroup1 from paramiko.kex_group14 import KexGroup14 -from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14, NullHostKey +from paramiko.kex_ecdh_nist import KexNistp256, KexNistp384, KexNistp521 +from paramiko.kex_gss import KexGSSGex, KexGSSGroup1, KexGSSGroup14 from paramiko.message import Message from paramiko.packet import Packetizer, NeedRekeyException from paramiko.primes import ModulusPack -from paramiko.py3compat import string_types, long, byte_ord, b +from paramiko.py3compat import string_types, long, byte_ord, b, input, PY2 from paramiko.rsakey import RSAKey from paramiko.ecdsakey import ECDSAKey from paramiko.server import ServerInterface from paramiko.sftp_client import SFTPClient -from paramiko.ssh_exception import (SSHException, BadAuthenticationType, - ChannelException, ProxyCommandFailure) +from paramiko.ssh_exception import ( + SSHException, BadAuthenticationType, ChannelException, ProxyCommandFailure, +) from paramiko.util import retry_on_signal, ClosingContextManager, clamp_value -from Crypto.Cipher import Blowfish, AES, DES3, ARC4 -try: - from Crypto.Util import Counter -except ImportError: - from paramiko.util import Counter - # for thread cleanup _active_threads = [] @@ -77,11 +81,12 @@ def _join_lingering_threads(): for thr in _active_threads: thr.stop_thread() + import atexit atexit.register(_join_lingering_threads) -class Transport (threading.Thread, ClosingContextManager): +class Transport(threading.Thread, ClosingContextManager): """ An SSH Transport attaches to a stream (usually a socket), negotiates an encrypted session, authenticates, and then creates stream tunnels, called @@ -91,30 +96,108 @@ class Transport (threading.Thread, ClosingContextManager): Instances of this class may be used as context managers. """ + _ENCRYPT = object() + _DECRYPT = object() + _PROTO_ID = '2.0' _CLIENT_ID = 'paramiko_%s' % paramiko.__version__ - _preferred_ciphers = ('aes128-ctr', 'aes256-ctr', 'aes128-cbc', 'blowfish-cbc', - 'aes256-cbc', '3des-cbc', 'arcfour128', 'arcfour256') - _preferred_macs = ('hmac-sha1', 'hmac-md5', 'hmac-sha1-96', 'hmac-md5-96') - _preferred_keys = ('ssh-rsa', 'ssh-dss', 'ecdsa-sha2-nistp256') - _preferred_kex = ( 'diffie-hellman-group14-sha1', 'diffie-hellman-group-exchange-sha1' , 'diffie-hellman-group1-sha1') + # These tuples of algorithm identifiers are in preference order; do not + # reorder without reason! + _preferred_ciphers = ( + 'aes128-ctr', + 'aes192-ctr', + 'aes256-ctr', + 'aes128-cbc', + 'blowfish-cbc', + 'aes192-cbc', + 'aes256-cbc', + '3des-cbc', + ) + _preferred_macs = ( + 'hmac-sha2-256', + 'hmac-sha2-512', + 'hmac-sha1', + 'hmac-md5', + 'hmac-sha1-96', + 'hmac-md5-96', + ) + _preferred_keys = ( + 'ecdsa-sha2-nistp256', + 'ecdsa-sha2-nistp384', + 'ecdsa-sha2-nistp521', + 'ssh-ed25519', + 'ssh-rsa', + 'ssh-dss', + ) + _preferred_kex = ( + 'ecdh-sha2-nistp256', + 'ecdh-sha2-nistp384', + 'ecdh-sha2-nistp521', + 'diffie-hellman-group-exchange-sha256', + 'diffie-hellman-group-exchange-sha1', + 'diffie-hellman-group14-sha1', + 'diffie-hellman-group1-sha1', + ) _preferred_compression = ('none',) _cipher_info = { - 'aes128-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 16}, - 'aes256-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 32}, - 'blowfish-cbc': {'class': Blowfish, 'mode': Blowfish.MODE_CBC, 'block-size': 8, 'key-size': 16}, - 'aes128-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 16}, - 'aes256-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 32}, - '3des-cbc': {'class': DES3, 'mode': DES3.MODE_CBC, 'block-size': 8, 'key-size': 24}, - 'arcfour128': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 16}, - 'arcfour256': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 32}, + 'aes128-ctr': { + 'class': algorithms.AES, + 'mode': modes.CTR, + 'block-size': 16, + 'key-size': 16 + }, + 'aes192-ctr': { + 'class': algorithms.AES, + 'mode': modes.CTR, + 'block-size': 16, + 'key-size': 24 + }, + 'aes256-ctr': { + 'class': algorithms.AES, + 'mode': modes.CTR, + 'block-size': 16, + 'key-size': 32 + }, + 'blowfish-cbc': { + 'class': algorithms.Blowfish, + 'mode': modes.CBC, + 'block-size': 8, + 'key-size': 16 + }, + 'aes128-cbc': { + 'class': algorithms.AES, + 'mode': modes.CBC, + 'block-size': 16, + 'key-size': 16 + }, + 'aes192-cbc': { + 'class': algorithms.AES, + 'mode': modes.CBC, + 'block-size': 16, + 'key-size': 24 + }, + 'aes256-cbc': { + 'class': algorithms.AES, + 'mode': modes.CBC, + 'block-size': 16, + 'key-size': 32 + }, + '3des-cbc': { + 'class': algorithms.TripleDES, + 'mode': modes.CBC, + 'block-size': 8, + 'key-size': 24 + }, } + _mac_info = { 'hmac-sha1': {'class': sha1, 'size': 20}, 'hmac-sha1-96': {'class': sha1, 'size': 12}, + 'hmac-sha2-256': {'class': sha256, 'size': 32}, + 'hmac-sha2-512': {'class': sha512, 'size': 64}, 'hmac-md5': {'class': md5, 'size': 16}, 'hmac-md5-96': {'class': md5, 'size': 12}, } @@ -123,15 +206,22 @@ class Transport (threading.Thread, ClosingContextManager): 'ssh-rsa': RSAKey, 'ssh-dss': DSSKey, 'ecdsa-sha2-nistp256': ECDSAKey, + 'ecdsa-sha2-nistp384': ECDSAKey, + 'ecdsa-sha2-nistp521': ECDSAKey, + 'ssh-ed25519': Ed25519Key, } _kex_info = { 'diffie-hellman-group1-sha1': KexGroup1, 'diffie-hellman-group14-sha1': KexGroup14, 'diffie-hellman-group-exchange-sha1': KexGex, + 'diffie-hellman-group-exchange-sha256': KexGexSHA256, 'gss-group1-sha1-toWM5Slw5Ew8Mqkay+al2g==': KexGSSGroup1, 'gss-group14-sha1-toWM5Slw5Ew8Mqkay+al2g==': KexGSSGroup14, - 'gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==': KexGSSGex + 'gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g==': KexGSSGex, + 'ecdh-sha2-nistp256': KexNistp256, + 'ecdh-sha2-nistp384': KexNistp384, + 'ecdh-sha2-nistp521': KexNistp521, } _compression_info = { @@ -144,6 +234,7 @@ class Transport (threading.Thread, ClosingContextManager): } _modulus_pack = None + _active_check_timeout = 0.1 def __init__(self, sock, @@ -153,8 +244,8 @@ class Transport (threading.Thread, ClosingContextManager): gss_deleg_creds=True): """ Create a new SSH session over an existing socket, or socket-like - object. This only creates the `.Transport` object; it doesn't begin the - SSH session yet. Use `connect` or `start_client` to begin a client + object. This only creates the `.Transport` object; it doesn't begin + the SSH session yet. Use `connect` or `start_client` to begin a client session, or `start_server` to begin a server session. If the object is not actually a socket, it must have the following @@ -196,6 +287,7 @@ class Transport (threading.Thread, ClosingContextManager): arguments. """ self.active = False + self._sshclient = None if isinstance(sock, string_types): # convert "host:port" into (host, port) @@ -208,10 +300,13 @@ class Transport (threading.Thread, ClosingContextManager): # connect to the given (host, port) hostname, port = sock reason = 'No suitable address family' - for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM): + addrinfos = socket.getaddrinfo( + hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM + ) + for family, socktype, proto, canonname, sockaddr in addrinfos: if socktype == socket.SOCK_STREAM: af = family - addr = sockaddr + # addr = sockaddr sock = socket.socket(af, socket.SOCK_STREAM) try: retry_on_signal(lambda: sock.connect((hostname, port))) @@ -231,7 +326,7 @@ class Transport (threading.Thread, ClosingContextManager): # we set the timeout so we can check self.active periodically to # see if we should bail. socket.timeout exception is never # propagated. - self.sock.settimeout(0.1) + self.sock.settimeout(self._active_check_timeout) except AttributeError: pass @@ -271,7 +366,8 @@ class Transport (threading.Thread, ClosingContextManager): self.in_kex = False self.authenticated = False self._expected_packet = tuple() - self.lock = threading.Lock() # synchronization (always higher level than write_lock) + # synchronization (always higher level than write_lock) + self.lock = threading.Lock() # tracking open channels self._channels = ChannelMap() @@ -292,9 +388,17 @@ class Transport (threading.Thread, ClosingContextManager): self.logger = util.get_logger(self.log_name) self.packetizer.set_log(self.logger) self.auth_handler = None - self.global_response = None # response Message from an arbitrary global request - self.completion_event = None # user-defined event callbacks - self.banner_timeout = 15 # how long (seconds) to wait for the SSH banner + # response Message from an arbitrary global request + self.global_response = None + # user-defined event callbacks + self.completion_event = None + # how long (seconds) to wait for the SSH banner + self.banner_timeout = 15 + # how long (seconds) to wait for the handshake to finish after SSH + # banner sent. + self.handshake_timeout = 15 + # how long (seconds) to wait for the auth response. + self.auth_timeout = 30 # server mode: self.server_mode = False @@ -313,8 +417,10 @@ class Transport (threading.Thread, ClosingContextManager): out += ' (unconnected)' else: if self.local_cipher != '': - out += ' (cipher %s, %d bits)' % (self.local_cipher, - self._cipher_info[self.local_cipher]['key-size'] * 8) + out += ' (cipher %s, %d bits)' % ( + self.local_cipher, + self._cipher_info[self.local_cipher]['key-size'] * 8 + ) if self.is_authenticated(): out += ' (active; %d open channel(s))' % len(self._channels) elif self.initial_kex_done: @@ -352,12 +458,11 @@ class Transport (threading.Thread, ClosingContextManager): :param str gss_host: The targets name in the kerberos database Default: The name of the host to connect to - :rtype: Void """ # We need the FQDN to get this working with SSPI self.gss_host = socket.getfqdn(gss_host) - def start_client(self, event=None): + def start_client(self, event=None, timeout=None): """ Negotiate a new SSH2 session as a client. This is the first step after creating a new `.Transport`. A separate thread is created for protocol @@ -368,7 +473,7 @@ class Transport (threading.Thread, ClosingContextManager): be triggered. On failure, `is_active` will return ``False``. (Since 1.4) If ``event`` is ``None``, this method will not return until - negotation is done. On success, the method returns normally. + negotiation is done. On success, the method returns normally. Otherwise an SSHException is raised. After a successful negotiation, you will usually want to authenticate, @@ -385,8 +490,12 @@ class Transport (threading.Thread, ClosingContextManager): :param .threading.Event event: an event to trigger when negotiation is complete (optional) - :raises SSHException: if negotiation fails (and no ``event`` was passed - in) + :param float timeout: + a timeout, in seconds, for SSH2 session negotiation (optional) + + :raises: + `.SSHException` -- if negotiation fails (and no ``event`` was + passed in) """ self.active = True if event is not None: @@ -398,6 +507,7 @@ class Transport (threading.Thread, ClosingContextManager): # synchronous, wait for a result self.completion_event = event = threading.Event() self.start() + max_time = time.time() + timeout if timeout is not None else None while True: event.wait(0.1) if not self.active: @@ -405,7 +515,10 @@ class Transport (threading.Thread, ClosingContextManager): if e is not None: raise e raise SSHException('Negotiation failed.') - if event.is_set(): + if ( + event.is_set() or + (timeout is not None and time.time() >= max_time) + ): break def start_server(self, event=None, server=None): @@ -419,7 +532,7 @@ class Transport (threading.Thread, ClosingContextManager): be triggered. On failure, `is_active` will return ``False``. (Since 1.4) If ``event`` is ``None``, this method will not return until - negotation is done. On success, the method returns normally. + negotiation is done. On success, the method returns normally. Otherwise an SSHException is raised. After a successful negotiation, the client will need to authenticate. @@ -446,8 +559,9 @@ class Transport (threading.Thread, ClosingContextManager): an object used to perform authentication and create `channels <.Channel>` - :raises SSHException: if negotiation fails (and no ``event`` was passed - in) + :raises: + `.SSHException` -- if negotiation fails (and no ``event`` was + passed in) """ if server is None: server = ServerInterface() @@ -549,6 +663,9 @@ class Transport (threading.Thread, ClosingContextManager): Transport._modulus_pack = None return False + def set_sshclient(self, sshclient): + self._sshclient = sshclient + def close(self): """ Close this session, and any open channels that are tied to it. @@ -559,6 +676,7 @@ class Transport (threading.Thread, ClosingContextManager): for chan in list(self._channels.values()): chan._unlink() self.sock.close() + self._sshclient = None def get_remote_server_key(self): """ @@ -569,7 +687,7 @@ class Transport (threading.Thread, ClosingContextManager): string)``. You can get the same effect by calling `.PKey.get_name` for the key type, and ``str(key)`` for the key string. - :raises SSHException: if no session is currently active. + :raises: `.SSHException` -- if no session is currently active. :return: public key (`.PKey`) of the remote server """ @@ -587,7 +705,12 @@ class Transport (threading.Thread, ClosingContextManager): """ return self.active - def open_session(self, window_size=None, max_packet_size=None): + def open_session( + self, + window_size=None, + max_packet_size=None, + timeout=None, + ): """ Request a new channel to the server, of type ``"session"``. This is just an alias for calling `open_channel` with an argument of @@ -604,15 +727,19 @@ class Transport (threading.Thread, ClosingContextManager): :return: a new `.Channel` - :raises SSHException: if the request is rejected or the session ends + :raises: + `.SSHException` -- if the request is rejected or the session ends prematurely + .. versionchanged:: 1.13.4/1.14.3/1.15.3 + Added the ``timeout`` argument. .. versionchanged:: 1.15 Added the ``window_size`` and ``max_packet_size`` arguments. """ return self.open_channel('session', window_size=window_size, - max_packet_size=max_packet_size) + max_packet_size=max_packet_size, + timeout=timeout) def open_x11_channel(self, src_addr=None): """ @@ -624,7 +751,8 @@ class Transport (threading.Thread, ClosingContextManager): x11 port, ie. 6010) :return: a new `.Channel` - :raises SSHException: if the request is rejected or the session ends + :raises: + `.SSHException` -- if the request is rejected or the session ends prematurely """ return self.open_channel('x11', src_addr=src_addr) @@ -638,14 +766,15 @@ class Transport (threading.Thread, ClosingContextManager): :return: a new `.Channel` - :raises SSHException: + :raises: `.SSHException` -- if the request is rejected or the session ends prematurely """ return self.open_channel('auth-agent@openssh.com') def open_forwarded_tcpip_channel(self, src_addr, dest_addr): """ - Request a new channel back to the client, of type ``"forwarded-tcpip"``. + Request a new channel back to the client, of type ``forwarded-tcpip``. + This is used after a client has requested port forwarding, for sending incoming connections back to the client. @@ -659,7 +788,8 @@ class Transport (threading.Thread, ClosingContextManager): dest_addr=None, src_addr=None, window_size=None, - max_packet_size=None): + max_packet_size=None, + timeout=None): """ Request a new channel to the server. `Channels <.Channel>` are socket-like objects used for the actual transfer of data across the @@ -683,17 +813,21 @@ class Transport (threading.Thread, ClosingContextManager): optional window size for this session. :param int max_packet_size: optional max packet size for this session. + :param float timeout: + optional timeout opening a channel, default 3600s (1h) :return: a new `.Channel` on success - :raises SSHException: if the request is rejected or the session ends - prematurely + :raises: + `.SSHException` -- if the request is rejected, the session ends + prematurely or there is a timeout openning a channel .. versionchanged:: 1.15 Added the ``window_size`` and ``max_packet_size`` arguments. """ if not self.active: raise SSHException('SSH session not active') + timeout = 3600 if timeout is None else timeout self.lock.acquire() try: window_size = self._sanitize_window_size(window_size) @@ -722,6 +856,7 @@ class Transport (threading.Thread, ClosingContextManager): finally: self.lock.release() self._send_user_message(m) + start_ts = time.time() while True: event.wait(0.1) if not self.active: @@ -731,6 +866,8 @@ class Transport (threading.Thread, ClosingContextManager): raise e if event.is_set(): break + elif start_ts + timeout < time.time(): + raise SSHException('Timeout openning channel.') chan = self._channels.get(chanid) if chan is not None: return chan @@ -747,7 +884,11 @@ class Transport (threading.Thread, ClosingContextManager): If a handler is given, that handler is called from a different thread whenever a forwarded connection arrives. The handler parameters are:: - handler(channel, (origin_addr, origin_port), (server_addr, server_port)) + handler( + channel, + (origin_addr, origin_port), + (server_addr, server_port), + ) where ``server_addr`` and ``server_port`` are the address and port that the server was listening on. @@ -765,20 +906,23 @@ class Transport (threading.Thread, ClosingContextManager): :return: the port number (`int`) allocated by the server - :raises SSHException: if the server refused the TCP forward request + :raises: + `.SSHException` -- if the server refused the TCP forward request """ if not self.active: raise SSHException('SSH session not active') port = int(port) - response = self.global_request('tcpip-forward', (address, port), wait=True) + response = self.global_request( + 'tcpip-forward', (address, port), wait=True + ) if response is None: raise SSHException('TCP forwarding request denied') if port == 0: port = response.get_int() if handler is None: def default_handler(channel, src_addr, dest_addr_port): - #src_addr, src_port = src_addr_port - #dest_addr, dest_port = dest_addr_port + # src_addr, src_port = src_addr_port + # dest_addr, dest_port = dest_addr_port self._queue_incoming_channel(channel) handler = default_handler self._tcp_handler = handler @@ -837,8 +981,9 @@ class Transport (threading.Thread, ClosingContextManager): traffic both ways as the two sides swap keys and do computations. This method returns when the session has switched to new keys. - :raises SSHException: if the key renegotiation failed (which causes the - session to end) + :raises: + `.SSHException` -- if the key renegotiation failed (which causes + the session to end) """ self.completion_event = threading.Event() self._send_kex_init() @@ -864,8 +1009,9 @@ class Transport (threading.Thread, ClosingContextManager): seconds to wait before sending a keepalive packet (or 0 to disable keepalives). """ - self.packetizer.set_keepalive(interval, - lambda x=weakref.proxy(self): x.global_request('keepalive@lag.net', wait=False)) + def _request(x=weakref.proxy(self)): + return x.global_request('keepalive@lag.net', wait=False) + self.packetizer.set_keepalive(interval, _request) def global_request(self, kind, data=None, wait=True): """ @@ -907,8 +1053,8 @@ class Transport (threading.Thread, ClosingContextManager): def accept(self, timeout=None): """ Return the next channel opened by the client over this transport, in - server mode. If no channel is opened before the given timeout, ``None`` - is returned. + server mode. If no channel is opened before the given timeout, + ``None`` is returned. :param int timeout: seconds to wait for a channel, or ``None`` to wait forever @@ -929,8 +1075,17 @@ class Transport (threading.Thread, ClosingContextManager): self.lock.release() return chan - def connect(self, hostkey=None, username='', password=None, pkey=None, - gss_host=None, gss_auth=False, gss_kex=False, gss_deleg_creds=True): + def connect( + self, + hostkey=None, + username='', + password=None, + pkey=None, + gss_host=None, + gss_auth=False, + gss_kex=False, + gss_deleg_creds=True, + ): """ Negotiate an SSH2 session, and optionally verify the server's host key and authenticate using a password or private key. This is a shortcut @@ -969,7 +1124,7 @@ class Transport (threading.Thread, ClosingContextManager): :param bool gss_deleg_creds: Whether to delegate GSS-API client credentials. - :raises SSHException: if the SSH2 negotiation fails, the host key + :raises: `.SSHException` -- if the SSH2 negotiation fails, the host key supplied by the server is incorrect, or authentication fails. """ if hostkey is not None: @@ -982,16 +1137,23 @@ class Transport (threading.Thread, ClosingContextManager): # the host key. if (hostkey is not None) and not gss_kex: key = self.get_remote_server_key() - if (key.get_name() != hostkey.get_name()) or (key.asbytes() != hostkey.asbytes()): + if ( + key.get_name() != hostkey.get_name() or + key.asbytes() != hostkey.asbytes() + ): self._log(DEBUG, 'Bad host key from server') - self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(hostkey.asbytes()))) - self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(key.asbytes()))) + self._log(DEBUG, 'Expected: %s: %s' % ( + hostkey.get_name(), repr(hostkey.asbytes())) + ) + self._log(DEBUG, 'Got : %s: %s' % ( + key.get_name(), repr(key.asbytes())) + ) raise SSHException('Bad host key from server') self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name()) if (pkey is not None) or (password is not None) or gss_auth or gss_kex: if gss_auth: - self._log(DEBUG, 'Attempting GSS-API auth... (gssapi-with-mic)') + self._log(DEBUG, 'Attempting GSS-API auth... (gssapi-with-mic)') # noqa self.auth_gssapi_with_mic(username, gss_host, gss_deleg_creds) elif gss_kex: self._log(DEBUG, 'Attempting GSS-API auth... (gssapi-keyex)') @@ -1036,7 +1198,7 @@ class Transport (threading.Thread, ClosingContextManager): passed to the `.SubsystemHandler` constructor later. :param str name: name of the subsystem. - :param class handler: + :param handler: subclass of `.SubsystemHandler` that handles this subsystem. """ try: @@ -1054,7 +1216,11 @@ class Transport (threading.Thread, ClosingContextManager): successfully; False if authentication failed and/or the session is closed. """ - return self.active and (self.auth_handler is not None) and self.auth_handler.is_authenticated() + return ( + self.active and + self.auth_handler is not None and + self.auth_handler.is_authenticated() + ) def get_username(self): """ @@ -1093,9 +1259,11 @@ class Transport (threading.Thread, ClosingContextManager): `list` of auth types permissible for the next stage of authentication (normally empty) - :raises BadAuthenticationType: if "none" authentication isn't allowed + :raises: + `.BadAuthenticationType` -- if "none" authentication isn't allowed by the server for this user - :raises SSHException: if the authentication failed due to a network + :raises: + `.SSHException` -- if the authentication failed due to a network error .. versionadded:: 1.5 @@ -1146,14 +1314,17 @@ class Transport (threading.Thread, ClosingContextManager): `list` of auth types permissible for the next stage of authentication (normally empty) - :raises BadAuthenticationType: if password authentication isn't + :raises: + `.BadAuthenticationType` -- if password authentication isn't allowed by the server for this user (and no event was passed in) - :raises AuthenticationException: if the authentication failed (and no + :raises: + `.AuthenticationException` -- if the authentication failed (and no event was passed in) - :raises SSHException: if there was a network error + :raises: `.SSHException` -- if there was a network error """ if (not self.active) or (not self.initial_kex_done): - # we should never try to send the password unless we're on a secure link + # we should never try to send the password unless we're on a secure + # link raise SSHException('No existing session') if event is None: my_event = threading.Event() @@ -1167,7 +1338,8 @@ class Transport (threading.Thread, ClosingContextManager): try: return self.auth_handler.wait_for_response(my_event) except BadAuthenticationType as e: - # if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it + # if password auth isn't allowed, but keyboard-interactive *is*, + # try to fudge it if not fallback or ('keyboard-interactive' not in e.allowed_types): raise try: @@ -1213,11 +1385,13 @@ class Transport (threading.Thread, ClosingContextManager): `list` of auth types permissible for the next stage of authentication (normally empty) - :raises BadAuthenticationType: if public-key authentication isn't + :raises: + `.BadAuthenticationType` -- if public-key authentication isn't allowed by the server for this user (and no event was passed in) - :raises AuthenticationException: if the authentication failed (and no + :raises: + `.AuthenticationException` -- if the authentication failed (and no event was passed in) - :raises SSHException: if there was a network error + :raises: `.SSHException` -- if there was a network error """ if (not self.active) or (not self.initial_kex_done): # we should never try to authenticate unless we're on a secure link @@ -1269,10 +1443,10 @@ class Transport (threading.Thread, ClosingContextManager): `list` of auth types permissible for the next stage of authentication (normally empty). - :raises BadAuthenticationType: if public-key authentication isn't + :raises: `.BadAuthenticationType` -- if public-key authentication isn't allowed by the server for this user - :raises AuthenticationException: if the authentication failed - :raises SSHException: if there was a network error + :raises: `.AuthenticationException` -- if the authentication failed + :raises: `.SSHException` -- if there was a network error .. versionadded:: 1.5 """ @@ -1281,9 +1455,32 @@ class Transport (threading.Thread, ClosingContextManager): raise SSHException('No existing session') my_event = threading.Event() self.auth_handler = AuthHandler(self) - self.auth_handler.auth_interactive(username, handler, my_event, submethods) + self.auth_handler.auth_interactive( + username, handler, my_event, submethods + ) return self.auth_handler.wait_for_response(my_event) + def auth_interactive_dumb(self, username, handler=None, submethods=''): + """ + Autenticate to the server interactively but dumber. + Just print the prompt and / or instructions to stdout and send back + the response. This is good for situations where partial auth is + achieved by key and then the user has to enter a 2fac token. + """ + + if not handler: + def handler(title, instructions, prompt_list): + answers = [] + if title: + print(title.strip()) + if instructions: + print(instructions.strip()) + for prompt, show_input in prompt_list: + print(prompt.strip(), end=' ') + answers.append(input()) + return answers + return self.auth_interactive(username, handler, submethods) + def auth_gssapi_with_mic(self, username, gss_host, gss_deleg_creds): """ Authenticate to the Server using GSS-API / SSPI. @@ -1294,36 +1491,37 @@ class Transport (threading.Thread, ClosingContextManager): :return: list of auth types permissible for the next stage of authentication (normally empty) :rtype: list - :raise BadAuthenticationType: if gssapi-with-mic isn't + :raises: `.BadAuthenticationType` -- if gssapi-with-mic isn't allowed by the server (and no event was passed in) - :raise AuthenticationException: if the authentication failed (and no + :raises: + `.AuthenticationException` -- if the authentication failed (and no event was passed in) - :raise SSHException: if there was a network error + :raises: `.SSHException` -- if there was a network error """ if (not self.active) or (not self.initial_kex_done): # we should never try to authenticate unless we're on a secure link raise SSHException('No existing session') my_event = threading.Event() self.auth_handler = AuthHandler(self) - self.auth_handler.auth_gssapi_with_mic(username, gss_host, gss_deleg_creds, my_event) + self.auth_handler.auth_gssapi_with_mic( + username, gss_host, gss_deleg_creds, my_event + ) return self.auth_handler.wait_for_response(my_event) def auth_gssapi_keyex(self, username): """ - Authenticate to the Server with GSS-API / SSPI if GSS-API Key Exchange - was the used key exchange method. + Authenticate to the server with GSS-API/SSPI if GSS-API kex is in use. - :param str username: The username to authenticate as - :param str gss_host: The target host - :param bool gss_deleg_creds: Delegate credentials or not - :return: list of auth types permissible for the next stage of - authentication (normally empty) - :rtype: list - :raise BadAuthenticationType: if GSS-API Key Exchange was not performed - (and no event was passed in) - :raise AuthenticationException: if the authentication failed (and no - event was passed in) - :raise SSHException: if there was a network error + :param str username: The username to authenticate as. + :returns: + a `list` of auth types permissible for the next stage of + authentication (normally empty) + :raises: `.BadAuthenticationType` -- + if GSS-API Key Exchange was not performed (and no event was passed + in) + :raises: `.AuthenticationException` -- + if the authentication failed (and no event was passed in) + :raises: `.SSHException` -- if there was a network error """ if (not self.active) or (not self.initial_kex_done): # we should never try to authenticate unless we're on a secure link @@ -1401,9 +1599,10 @@ class Transport (threading.Thread, ClosingContextManager): def getpeername(self): """ Return the address of the remote side of this Transport, if possible. - This is effectively a wrapper around ``'getpeername'`` on the underlying - socket. If the socket-like object has no ``'getpeername'`` method, - then ``("unknown", 0)`` is returned. + + This is effectively a wrapper around ``getpeername`` on the underlying + socket. If the socket-like object has no ``getpeername`` method, then + ``("unknown", 0)`` is returned. :return: the address of the remote host, if known, as a ``(str, int)`` @@ -1417,10 +1616,27 @@ class Transport (threading.Thread, ClosingContextManager): def stop_thread(self): self.active = False self.packetizer.close() - while self.is_alive() and (self is not threading.current_thread()): - self.join(10) - - ### internals... + if PY2: + # Original join logic; #520 doesn't appear commonly present under + # Python 2. + while self.is_alive() and self is not threading.current_thread(): + self.join(10) + else: + # Keep trying to join() our main thread, quickly, until: + # * We join()ed successfully (self.is_alive() == False) + # * Or it looks like we've hit issue #520 (socket.recv hitting some + # race condition preventing it from timing out correctly), wherein + # our socket and packetizer are both closed (but where we'd + # otherwise be sitting forever on that recv()). + while ( + self.is_alive() and + self is not threading.current_thread() and + not self.sock._closed and + not self.packetizer.closed + ): + self.join(0.1) + + # internals... def _log(self, level, msg, *args): if issubclass(type(msg), list): @@ -1458,28 +1674,32 @@ class Transport (threading.Thread, ClosingContextManager): while True: self.clear_to_send.wait(0.1) if not self.active: - self._log(DEBUG, 'Dropping user packet because connection is dead.') + self._log(DEBUG, 'Dropping user packet because connection is dead.') # noqa return self.clear_to_send_lock.acquire() if self.clear_to_send.is_set(): break self.clear_to_send_lock.release() if time.time() > start + self.clear_to_send_timeout: - raise SSHException('Key-exchange timed out waiting for key negotiation') + raise SSHException('Key-exchange timed out waiting for key negotiation') # noqa try: self._send_message(data) finally: self.clear_to_send_lock.release() def _set_K_H(self, k, h): - """used by a kex object to set the K (root key) and H (exchange hash)""" + """ + Used by a kex obj to set the K (root key) and H (exchange hash). + """ self.K = k self.H = h if self.session_id is None: self.session_id = h def _expect_packet(self, *ptypes): - """used by a kex object to register the next packet type it expects to see""" + """ + Used by a kex obj to register the next packet type it expects to see. + """ self._expected_packet = tuple(ptypes) def _verify_key(self, host_key, sig): @@ -1487,7 +1707,7 @@ class Transport (threading.Thread, ClosingContextManager): if key is None: raise SSHException('Unknown host key type') if not key.verify_ssh_sig(self.H, Message(sig)): - raise SSHException('Signature verification (%s) failed.' % self.host_key_type) + raise SSHException('Signature verification (%s) failed.' % self.host_key_type) # noqa self.host_key = key def _compute_key(self, id, nbytes): @@ -1497,33 +1717,42 @@ class Transport (threading.Thread, ClosingContextManager): m.add_bytes(self.H) m.add_byte(b(id)) m.add_bytes(self.session_id) - out = sofar = sha1(m.asbytes()).digest() + # Fallback to SHA1 for kex engines that fail to specify a hex + # algorithm, or for e.g. transport tests that don't run kexinit. + hash_algo = getattr(self.kex_engine, 'hash_algo', None) + hash_select_msg = "kex engine %s specified hash_algo %r" % ( + self.kex_engine.__class__.__name__, hash_algo + ) + if hash_algo is None: + hash_algo = sha1 + hash_select_msg += ", falling back to sha1" + if not hasattr(self, '_logged_hash_selection'): + self._log(DEBUG, hash_select_msg) + setattr(self, '_logged_hash_selection', True) + out = sofar = hash_algo(m.asbytes()).digest() while len(out) < nbytes: m = Message() m.add_mpint(self.K) m.add_bytes(self.H) m.add_bytes(sofar) - digest = sha1(m.asbytes()).digest() + digest = hash_algo(m.asbytes()).digest() out += digest sofar += digest return out[:nbytes] - def _get_cipher(self, name, key, iv): + def _get_cipher(self, name, key, iv, operation): if name not in self._cipher_info: raise SSHException('Unknown client cipher ' + name) - if name in ('arcfour128', 'arcfour256'): - # arcfour cipher - cipher = self._cipher_info[name]['class'].new(key) - # as per RFC 4345, the first 1536 bytes of keystream - # generated by the cipher MUST be discarded - cipher.encrypt(" " * 1536) - return cipher - elif name.endswith("-ctr"): - # CTR modes, we need a counter - counter = Counter.new(nbits=self._cipher_info[name]['block-size'] * 8, initial_value=util.inflate_long(iv, True)) - return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv, counter) else: - return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv) + cipher = Cipher( + self._cipher_info[name]['class'](key), + self._cipher_info[name]['mode'](iv), + backend=default_backend(), + ) + if operation is self._ENCRYPT: + return cipher.encryptor() + else: + return cipher.decryptor() def _set_forward_agent_handler(self, handler): if handler is None: @@ -1574,14 +1803,24 @@ class Transport (threading.Thread, ClosingContextManager): # active=True occurs before the thread is launched, to avoid a race _active_threads.append(self) + tid = hex(long(id(self)) & xffffffff) if self.server_mode: - self._log(DEBUG, 'starting thread (server mode): %s' % hex(long(id(self)) & xffffffff)) + self._log(DEBUG, 'starting thread (server mode): %s' % tid) else: - self._log(DEBUG, 'starting thread (client mode): %s' % hex(long(id(self)) & xffffffff)) + self._log(DEBUG, 'starting thread (client mode): %s' % tid) try: try: self.packetizer.write_all(b(self.local_version + '\r\n')) + self._log(DEBUG, 'Local version/idstring: %s' % self.local_version) # noqa self._check_banner() + # The above is actually very much part of the handshake, but + # sometimes the banner can be read but the machine is not + # responding, for example when the remote ssh daemon is loaded + # in to memory but we can not read from the disk/spawn a new + # shell. + # Make sure we can specify a timeout for the initial handshake. + # Re-use the banner timeout for now. + self.packetizer.start_handshake(self.handshake_timeout) self._send_kex_init() self._expect_packet(MSG_KEXINIT) @@ -1604,7 +1843,7 @@ class Transport (threading.Thread, ClosingContextManager): continue if len(self._expected_packet) > 0: if ptype not in self._expected_packet: - raise SSHException('Expecting packet from %r, got %d' % (self._expected_packet, ptype)) + raise SSHException('Expecting packet from %r, got %d' % (self._expected_packet, ptype)) # noqa self._expected_packet = tuple() if (ptype >= 30) and (ptype <= 41): self.kex_engine.parse_next(ptype, m) @@ -1618,26 +1857,30 @@ class Transport (threading.Thread, ClosingContextManager): if chan is not None: self._channel_handler_table[ptype](chan, m) elif chanid in self.channels_seen: - self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid) + self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid) # noqa else: - self._log(ERROR, 'Channel request for unknown channel %d' % chanid) + self._log(ERROR, 'Channel request for unknown channel %d' % chanid) # noqa self.active = False self.packetizer.close() - elif (self.auth_handler is not None) and (ptype in self.auth_handler._handler_table): - self.auth_handler._handler_table[ptype](self.auth_handler, m) + elif ( + self.auth_handler is not None and + ptype in self.auth_handler._handler_table + ): + handler = self.auth_handler._handler_table[ptype] + handler(self.auth_handler, m) else: self._log(WARNING, 'Oops, unhandled type %d' % ptype) msg = Message() msg.add_byte(cMSG_UNIMPLEMENTED) msg.add_int(m.seqno) self._send_message(msg) + self.packetizer.complete_handshake() except SSHException as e: self._log(ERROR, 'Exception: ' + str(e)) self._log(ERROR, util.tb_strings()) self.saved_exception = e except EOFError as e: self._log(DEBUG, 'EOF in transport thread') - #self._log(DEBUG, util.tb_strings()) self.saved_exception = e except socket.error as e: if type(e.args) is tuple: @@ -1679,7 +1922,19 @@ class Transport (threading.Thread, ClosingContextManager): if self.sys.modules is not None: raise - ### protocol stages + + def _log_agreement(self, which, local, remote): + # Log useful, non-duplicative line re: an agreed-upon algorithm. + # Old code implied algorithms could be asymmetrical (different for + # inbound vs outbound) so we preserve that possibility. + msg = "{0} agreed: ".format(which) + if local == remote: + msg += local + else: + msg += "local={0}, remote={1}".format(local, remote) + self._log(DEBUG, msg) + + # protocol stages def _negotiate_keys(self, m): # throws SSHException on anything unusual @@ -1708,7 +1963,9 @@ class Transport (threading.Thread, ClosingContextManager): except ProxyCommandFailure: raise except Exception as e: - raise SSHException('Error reading SSH protocol banner' + str(e)) + raise SSHException( + 'Error reading SSH protocol banner' + str(e) + ) if buf[:4] == 'SSH-': break self._log(DEBUG, 'Banner: ' + buf) @@ -1716,11 +1973,12 @@ class Transport (threading.Thread, ClosingContextManager): raise SSHException('Indecipherable protocol version "' + buf + '"') # save this server version string for later self.remote_version = buf + self._log(DEBUG, 'Remote version/idstring: %s' % buf) # pull off any attached comment - comment = '' + # NOTE: comment used to be stored in a variable and then...never used. + # since 2003. ca 877cd974b8182d26fa76d566072917ea67b64e67 i = buf.find(' ') if i >= 0: - comment = buf[i+1:] buf = buf[:i] # parse out version string and make sure it matches segs = buf.split('-', 2) @@ -1729,8 +1987,10 @@ class Transport (threading.Thread, ClosingContextManager): version = segs[1] client = segs[2] if version != '1.99' and version != '2.0': - raise SSHException('Incompatible version (%s instead of 2.0)' % (version,)) - self._log(INFO, 'Connected (version %s, client %s)' % (version, client)) + msg = 'Incompatible version ({0} instead of 2.0)' + raise SSHException(msg.format(version)) + msg = 'Connected (version {0}, client {1})'.format(version, client) + self._log(INFO, msg) def _send_kex_init(self): """ @@ -1744,13 +2004,25 @@ class Transport (threading.Thread, ClosingContextManager): self.clear_to_send_lock.release() self.in_kex = True if self.server_mode: - if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex): - # can't do group-exchange if we don't have a pack of potential primes - pkex = list(self.get_security_options().kex) - pkex.remove('diffie-hellman-group-exchange-sha1') + mp_required_prefix = 'diffie-hellman-group-exchange-sha' + kex_mp = [ + k for k + in self._preferred_kex + if k.startswith(mp_required_prefix) + ] + if (self._modulus_pack is None) and (len(kex_mp) > 0): + # can't do group-exchange if we don't have a pack of potential + # primes + pkex = [ + k for k + in self.get_security_options().kex + if not k.startswith(mp_required_prefix) + ] self.get_security_options().kex = pkex - available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__, - self._preferred_keys)) + available_server_keys = list(filter( + list(self.server_key_dict.keys()).__contains__, + self._preferred_keys + )) else: available_server_keys = self._preferred_keys @@ -1774,7 +2046,7 @@ class Transport (threading.Thread, ClosingContextManager): self._send_message(m) def _parse_kex_init(self, m): - cookie = m.get_bytes(16) + m.get_bytes(16) # cookie, discarded kex_algo_list = m.get_list() server_key_algo_list = m.get_list() client_encrypt_algo_list = m.get_list() @@ -1786,82 +2058,141 @@ class Transport (threading.Thread, ClosingContextManager): client_lang_list = m.get_list() server_lang_list = m.get_list() kex_follows = m.get_boolean() - unused = m.get_int() - - self._log(DEBUG, 'kex algos:' + str(kex_algo_list) + ' server key:' + str(server_key_algo_list) + - ' client encrypt:' + str(client_encrypt_algo_list) + - ' server encrypt:' + str(server_encrypt_algo_list) + - ' client mac:' + str(client_mac_algo_list) + - ' server mac:' + str(server_mac_algo_list) + - ' client compress:' + str(client_compress_algo_list) + - ' server compress:' + str(server_compress_algo_list) + - ' client lang:' + str(client_lang_list) + - ' server lang:' + str(server_lang_list) + - ' kex follows?' + str(kex_follows)) - - # as a server, we pick the first item in the client's list that we support. - # as a client, we pick the first item in our list that the server supports. + m.get_int() # unused + + self._log(DEBUG, + 'kex algos:' + str(kex_algo_list) + + ' server key:' + str(server_key_algo_list) + + ' client encrypt:' + str(client_encrypt_algo_list) + + ' server encrypt:' + str(server_encrypt_algo_list) + + ' client mac:' + str(client_mac_algo_list) + + ' server mac:' + str(server_mac_algo_list) + + ' client compress:' + str(client_compress_algo_list) + + ' server compress:' + str(server_compress_algo_list) + + ' client lang:' + str(client_lang_list) + + ' server lang:' + str(server_lang_list) + + ' kex follows?' + str(kex_follows) + ) + + # as a server, we pick the first item in the client's list that we + # support. + # as a client, we pick the first item in our list that the server + # supports. if self.server_mode: - agreed_kex = list(filter(self._preferred_kex.__contains__, kex_algo_list)) + agreed_kex = list(filter( + self._preferred_kex.__contains__, + kex_algo_list + )) else: - agreed_kex = list(filter(kex_algo_list.__contains__, self._preferred_kex)) + agreed_kex = list(filter( + kex_algo_list.__contains__, + self._preferred_kex + )) if len(agreed_kex) == 0: - raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)') + raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)') # noqa self.kex_engine = self._kex_info[agreed_kex[0]](self) + self._log(DEBUG, "Kex agreed: %s" % agreed_kex[0]) if self.server_mode: - available_server_keys = list(filter(list(self.server_key_dict.keys()).__contains__, - self._preferred_keys)) - agreed_keys = list(filter(available_server_keys.__contains__, server_key_algo_list)) + available_server_keys = list(filter( + list(self.server_key_dict.keys()).__contains__, + self._preferred_keys + )) + agreed_keys = list(filter( + available_server_keys.__contains__, server_key_algo_list + )) else: - agreed_keys = list(filter(server_key_algo_list.__contains__, self._preferred_keys)) + agreed_keys = list(filter( + server_key_algo_list.__contains__, self._preferred_keys + )) if len(agreed_keys) == 0: - raise SSHException('Incompatible ssh peer (no acceptable host key)') + raise SSHException('Incompatible ssh peer (no acceptable host key)') # noqa self.host_key_type = agreed_keys[0] if self.server_mode and (self.get_server_key() is None): - raise SSHException('Incompatible ssh peer (can\'t match requested host key type)') + raise SSHException('Incompatible ssh peer (can\'t match requested host key type)') # noqa if self.server_mode: - agreed_local_ciphers = list(filter(self._preferred_ciphers.__contains__, - server_encrypt_algo_list)) - agreed_remote_ciphers = list(filter(self._preferred_ciphers.__contains__, - client_encrypt_algo_list)) + agreed_local_ciphers = list(filter( + self._preferred_ciphers.__contains__, + server_encrypt_algo_list + )) + agreed_remote_ciphers = list(filter( + self._preferred_ciphers.__contains__, + client_encrypt_algo_list + )) else: - agreed_local_ciphers = list(filter(client_encrypt_algo_list.__contains__, - self._preferred_ciphers)) - agreed_remote_ciphers = list(filter(server_encrypt_algo_list.__contains__, - self._preferred_ciphers)) - if (len(agreed_local_ciphers) == 0) or (len(agreed_remote_ciphers) == 0): - raise SSHException('Incompatible ssh server (no acceptable ciphers)') + agreed_local_ciphers = list(filter( + client_encrypt_algo_list.__contains__, + self._preferred_ciphers + )) + agreed_remote_ciphers = list(filter( + server_encrypt_algo_list.__contains__, + self._preferred_ciphers + )) + if len(agreed_local_ciphers) == 0 or len(agreed_remote_ciphers) == 0: + raise SSHException('Incompatible ssh server (no acceptable ciphers)') # noqa self.local_cipher = agreed_local_ciphers[0] self.remote_cipher = agreed_remote_ciphers[0] - self._log(DEBUG, 'Ciphers agreed: local=%s, remote=%s' % (self.local_cipher, self.remote_cipher)) + self._log_agreement( + 'Cipher', local=self.local_cipher, remote=self.remote_cipher + ) if self.server_mode: - agreed_remote_macs = list(filter(self._preferred_macs.__contains__, client_mac_algo_list)) - agreed_local_macs = list(filter(self._preferred_macs.__contains__, server_mac_algo_list)) + agreed_remote_macs = list(filter( + self._preferred_macs.__contains__, client_mac_algo_list + )) + agreed_local_macs = list(filter( + self._preferred_macs.__contains__, server_mac_algo_list + )) else: - agreed_local_macs = list(filter(client_mac_algo_list.__contains__, self._preferred_macs)) - agreed_remote_macs = list(filter(server_mac_algo_list.__contains__, self._preferred_macs)) + agreed_local_macs = list(filter( + client_mac_algo_list.__contains__, self._preferred_macs + )) + agreed_remote_macs = list(filter( + server_mac_algo_list.__contains__, self._preferred_macs + )) if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0): raise SSHException('Incompatible ssh server (no acceptable macs)') self.local_mac = agreed_local_macs[0] self.remote_mac = agreed_remote_macs[0] + self._log_agreement( + 'MAC', local=self.local_mac, remote=self.remote_mac + ) if self.server_mode: - agreed_remote_compression = list(filter(self._preferred_compression.__contains__, client_compress_algo_list)) - agreed_local_compression = list(filter(self._preferred_compression.__contains__, server_compress_algo_list)) + agreed_remote_compression = list(filter( + self._preferred_compression.__contains__, + client_compress_algo_list + )) + agreed_local_compression = list(filter( + self._preferred_compression.__contains__, + server_compress_algo_list + )) else: - agreed_local_compression = list(filter(client_compress_algo_list.__contains__, self._preferred_compression)) - agreed_remote_compression = list(filter(server_compress_algo_list.__contains__, self._preferred_compression)) - if (len(agreed_local_compression) == 0) or (len(agreed_remote_compression) == 0): - raise SSHException('Incompatible ssh server (no acceptable compression) %r %r %r' % (agreed_local_compression, agreed_remote_compression, self._preferred_compression)) + agreed_local_compression = list(filter( + client_compress_algo_list.__contains__, + self._preferred_compression + )) + agreed_remote_compression = list(filter( + server_compress_algo_list.__contains__, + self._preferred_compression + )) + if ( + len(agreed_local_compression) == 0 or + len(agreed_remote_compression) == 0 + ): + msg = 'Incompatible ssh server (no acceptable compression) {0!r} {1!r} {2!r}' # noqa + raise SSHException(msg.format( + agreed_local_compression, agreed_remote_compression, + self._preferred_compression, + )) self.local_compression = agreed_local_compression[0] self.remote_compression = agreed_remote_compression[0] - - self._log(DEBUG, 'using kex %s; server key type %s; cipher: local %s, remote %s; mac: local %s, remote %s; compression: local %s, remote %s' % - (agreed_kex[0], self.host_key_type, self.local_cipher, self.remote_cipher, self.local_mac, - self.remote_mac, self.local_compression, self.remote_compression)) + self._log_agreement( + 'Compression', + local=self.local_compression, + remote=self.remote_compression + ) # save for computing hash later... # now wait! openssh has a bug (and others might too) where there are @@ -1871,54 +2202,80 @@ class Transport (threading.Thread, ClosingContextManager): self.remote_kex_init = cMSG_KEXINIT + m.get_so_far() def _activate_inbound(self): - """switch on newly negotiated encryption parameters for inbound traffic""" + """switch on newly negotiated encryption parameters for + inbound traffic""" block_size = self._cipher_info[self.remote_cipher]['block-size'] if self.server_mode: IV_in = self._compute_key('A', block_size) - key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size']) + key_in = self._compute_key( + 'C', self._cipher_info[self.remote_cipher]['key-size'] + ) else: IV_in = self._compute_key('B', block_size) - key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size']) - engine = self._get_cipher(self.remote_cipher, key_in, IV_in) + key_in = self._compute_key( + 'D', self._cipher_info[self.remote_cipher]['key-size'] + ) + engine = self._get_cipher( + self.remote_cipher, key_in, IV_in, self._DECRYPT + ) mac_size = self._mac_info[self.remote_mac]['size'] mac_engine = self._mac_info[self.remote_mac]['class'] - # initial mac keys are done in the hash's natural size (not the potentially truncated - # transmission size) + # initial mac keys are done in the hash's natural size (not the + # potentially truncated transmission size) if self.server_mode: mac_key = self._compute_key('E', mac_engine().digest_size) else: mac_key = self._compute_key('F', mac_engine().digest_size) - self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key) + self.packetizer.set_inbound_cipher( + engine, block_size, mac_engine, mac_size, mac_key + ) compress_in = self._compression_info[self.remote_compression][1] - if (compress_in is not None) and ((self.remote_compression != 'zlib@openssh.com') or self.authenticated): + if ( + compress_in is not None and + ( + self.remote_compression != 'zlib@openssh.com' or + self.authenticated + ) + ): self._log(DEBUG, 'Switching on inbound compression ...') self.packetizer.set_inbound_compressor(compress_in()) def _activate_outbound(self): - """switch on newly negotiated encryption parameters for outbound traffic""" + """switch on newly negotiated encryption parameters for + outbound traffic""" m = Message() m.add_byte(cMSG_NEWKEYS) self._send_message(m) block_size = self._cipher_info[self.local_cipher]['block-size'] if self.server_mode: IV_out = self._compute_key('B', block_size) - key_out = self._compute_key('D', self._cipher_info[self.local_cipher]['key-size']) + key_out = self._compute_key( + 'D', self._cipher_info[self.local_cipher]['key-size']) else: IV_out = self._compute_key('A', block_size) - key_out = self._compute_key('C', self._cipher_info[self.local_cipher]['key-size']) - engine = self._get_cipher(self.local_cipher, key_out, IV_out) + key_out = self._compute_key( + 'C', self._cipher_info[self.local_cipher]['key-size']) + engine = self._get_cipher( + self.local_cipher, key_out, IV_out, self._ENCRYPT) mac_size = self._mac_info[self.local_mac]['size'] mac_engine = self._mac_info[self.local_mac]['class'] - # initial mac keys are done in the hash's natural size (not the potentially truncated - # transmission size) + # initial mac keys are done in the hash's natural size (not the + # potentially truncated transmission size) if self.server_mode: mac_key = self._compute_key('F', mac_engine().digest_size) else: mac_key = self._compute_key('E', mac_engine().digest_size) sdctr = self.local_cipher.endswith('-ctr') - self.packetizer.set_outbound_cipher(engine, block_size, mac_engine, mac_size, mac_key, sdctr) + self.packetizer.set_outbound_cipher( + engine, block_size, mac_engine, mac_size, mac_key, sdctr) compress_out = self._compression_info[self.local_compression][0] - if (compress_out is not None) and ((self.local_compression != 'zlib@openssh.com') or self.authenticated): + if ( + compress_out is not None and + ( + self.local_compression != 'zlib@openssh.com' or + self.authenticated + ) + ): self._log(DEBUG, 'Switching on outbound compression ...') self.packetizer.set_outbound_compressor(compress_out()) if not self.packetizer.need_rekey(): @@ -1974,7 +2331,10 @@ class Transport (threading.Thread, ClosingContextManager): self._log(DEBUG, 'Received global request "%s"' % kind) want_reply = m.get_boolean() if not self.server_mode: - self._log(DEBUG, 'Rejecting "%s" global request from server.' % kind) + self._log( + DEBUG, + 'Rejecting "%s" global request from server.' % kind + ) ok = False elif kind == 'tcpip-forward': address = m.get_text() @@ -2025,7 +2385,8 @@ class Transport (threading.Thread, ClosingContextManager): return self.lock.acquire() try: - chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size) + chan._set_remote_channel( + server_chanid, server_window_size, server_max_packet_size) self._log(DEBUG, 'Secsh channel %d opened.' % chanid) if chanid in self.channel_events: self.channel_events[chanid].set() @@ -2038,9 +2399,13 @@ class Transport (threading.Thread, ClosingContextManager): chanid = m.get_int() reason = m.get_int() reason_str = m.get_text() - lang = m.get_text() + m.get_text() # ignored language reason_text = CONNECTION_FAILED_CODE.get(reason, '(unknown code)') - self._log(ERROR, 'Secsh channel %d open FAILED: %s: %s' % (chanid, reason_str, reason_text)) + self._log( + ERROR, + 'Secsh channel %d open FAILED: %s: %s' % ( + chanid, reason_str, reason_text) + ) self.lock.acquire() try: self.saved_exception = ChannelException(reason, reason_text) @@ -2059,7 +2424,10 @@ class Transport (threading.Thread, ClosingContextManager): initial_window_size = m.get_int() max_packet_size = m.get_int() reject = False - if (kind == 'auth-agent@openssh.com') and (self._forward_agent_handler is not None): + if ( + kind == 'auth-agent@openssh.com' and + self._forward_agent_handler is not None + ): self._log(DEBUG, 'Incoming forward agent connection') self.lock.acquire() try: @@ -2069,7 +2437,11 @@ class Transport (threading.Thread, ClosingContextManager): elif (kind == 'x11') and (self._x11_handler is not None): origin_addr = m.get_text() origin_port = m.get_int() - self._log(DEBUG, 'Incoming x11 connection from %s:%d' % (origin_addr, origin_port)) + self._log( + DEBUG, + 'Incoming x11 connection from %s:%d' % ( + origin_addr, origin_port) + ) self.lock.acquire() try: my_chanid = self._next_channel() @@ -2080,14 +2452,20 @@ class Transport (threading.Thread, ClosingContextManager): server_port = m.get_int() origin_addr = m.get_text() origin_port = m.get_int() - self._log(DEBUG, 'Incoming tcp forwarded connection from %s:%d' % (origin_addr, origin_port)) + self._log( + DEBUG, + 'Incoming tcp forwarded connection from %s:%d' % ( + origin_addr, origin_port) + ) self.lock.acquire() try: my_chanid = self._next_channel() finally: self.lock.release() elif not self.server_mode: - self._log(DEBUG, 'Rejecting "%s" channel request from server.' % kind) + self._log( + DEBUG, + 'Rejecting "%s" channel request from server.' % kind) reject = True reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED else: @@ -2097,17 +2475,23 @@ class Transport (threading.Thread, ClosingContextManager): finally: self.lock.release() if kind == 'direct-tcpip': - # handle direct-tcpip requests comming from the client + # handle direct-tcpip requests coming from the client dest_addr = m.get_text() dest_port = m.get_int() origin_addr = m.get_text() origin_port = m.get_int() reason = self.server_object.check_channel_direct_tcpip_request( - my_chanid, (origin_addr, origin_port), (dest_addr, dest_port)) + my_chanid, + (origin_addr, origin_port), + (dest_addr, dest_port) + ) else: - reason = self.server_object.check_channel_request(kind, my_chanid) + reason = self.server_object.check_channel_request( + kind, my_chanid) if reason != OPEN_SUCCEEDED: - self._log(DEBUG, 'Rejecting "%s" channel request from client.' % kind) + self._log( + DEBUG, + 'Rejecting "%s" channel request from client.' % kind) reject = True if reject: msg = Message() @@ -2125,8 +2509,10 @@ class Transport (threading.Thread, ClosingContextManager): self._channels.put(my_chanid, chan) self.channels_seen[my_chanid] = True chan._set_transport(self) - chan._set_window(self.default_window_size, self.default_max_packet_size) - chan._set_remote_channel(chanid, initial_window_size, max_packet_size) + chan._set_window( + self.default_window_size, self.default_max_packet_size) + chan._set_remote_channel( + chanid, initial_window_size, max_packet_size) finally: self.lock.release() m = Message() @@ -2143,14 +2529,18 @@ class Transport (threading.Thread, ClosingContextManager): self._x11_handler(chan, (origin_addr, origin_port)) elif kind == 'forwarded-tcpip': chan.origin_addr = (origin_addr, origin_port) - self._tcp_handler(chan, (origin_addr, origin_port), (server_addr, server_port)) + self._tcp_handler( + chan, + (origin_addr, origin_port), + (server_addr, server_port) + ) else: self._queue_incoming_channel(chan) def _parse_debug(self, m): - always_display = m.get_boolean() + m.get_boolean() # always_display msg = m.get_string() - lang = m.get_string() + m.get_string() # language self._log(DEBUG, 'Debug msg: {0}'.format(util.safe_string(msg))) def _get_subsystem_handler(self, name): @@ -2197,7 +2587,6 @@ class SecurityOptions (object): ``ValueError`` will be raised. If you try to assign something besides a tuple to one of the fields, ``TypeError`` will be raised. """ - #__slots__ = [ 'ciphers', 'digests', 'key_types', 'kex', 'compression', '_transport' ] __slots__ = '_transport' def __init__(self, transport): diff --git a/paramiko/util.py b/paramiko/util.py index d9a29d74..de099c0c 100644 --- a/paramiko/util.py +++ b/paramiko/util.py @@ -22,7 +22,6 @@ Useful functions used by the rest of paramiko. from __future__ import generators -import array import errno import sys import struct @@ -31,12 +30,13 @@ import threading import logging from paramiko.common import DEBUG, zero_byte, xffffffff, max_byte -from paramiko.py3compat import PY2, long, byte_ord, b, byte_chr +from paramiko.py3compat import PY2, long, byte_chr, byte_ord, b from paramiko.config import SSHConfig def inflate_long(s, always_positive=False): - """turns a normalized byte string into a long-int (adapted from Crypto.Util.number)""" + """turns a normalized byte string into a long-int + (adapted from Crypto.Util.number)""" out = long(0) negative = 0 if not always_positive and (len(s) > 0) and (byte_ord(s[0]) >= 0x80): @@ -49,17 +49,19 @@ def inflate_long(s, always_positive=False): # noinspection PyAugmentAssignment s = filler * (4 - len(s) % 4) + s for i in range(0, len(s), 4): - out = (out << 32) + struct.unpack('>I', s[i:i+4])[0] + out = (out << 32) + struct.unpack('>I', s[i:i + 4])[0] if negative: out -= (long(1) << (8 * len(s))) return out + deflate_zero = zero_byte if PY2 else 0 deflate_ff = max_byte if PY2 else 0xff def deflate_long(n, add_sign_padding=True): - """turns a long-int into a normalized byte string (adapted from Crypto.Util.number)""" + """turns a long-int into a normalized byte string + (adapted from Crypto.Util.number)""" # after much testing, this algorithm was deemed to be the fastest s = bytes() n = long(n) @@ -92,16 +94,16 @@ def format_binary(data, prefix=''): x = 0 out = [] while len(data) > x + 16: - out.append(format_binary_line(data[x:x+16])) + out.append(format_binary_line(data[x:x + 16])) x += 16 if x < len(data): out.append(format_binary_line(data[x:])) - return [prefix + x for x in out] + return [prefix + line for line in out] def format_binary_line(data): left = ' '.join(['%02X' % byte_ord(c) for c in data]) - right = ''.join([('.%c..' % c)[(byte_ord(c)+63)//95] for c in data]) + right = ''.join([('.%c..' % c)[(byte_ord(c) + 63) // 95] for c in data]) return '%-50s %s' % (left, right) @@ -118,7 +120,7 @@ def safe_string(s): def bit_length(n): try: - return n.bitlength() + return n.bit_length() except AttributeError: norm = deflate_long(n, False) hbyte = byte_ord(norm[0]) @@ -216,6 +218,7 @@ def mod_inverse(x, m): u2 += m return u2 + _g_thread_ids = {} _g_thread_counter = 0 _g_thread_lock = threading.Lock() @@ -237,15 +240,16 @@ def get_thread_id(): def log_to_file(filename, level=DEBUG): - """send paramiko logs to a logfile, if they're not already going somewhere""" + """send paramiko logs to a logfile, + if they're not already going somewhere""" l = logging.getLogger("paramiko") if len(l.handlers) > 0: return l.setLevel(level) - f = open(filename, 'w') + f = open(filename, 'a') lh = logging.StreamHandler(f) - lh.setFormatter(logging.Formatter('%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d %(name)s: %(message)s', - '%Y%m%d-%H:%M:%S')) + frm = '%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d %(name)s: %(message)s' # noqa + lh.setFormatter(logging.Formatter(frm, '%Y%m%d-%H:%M:%S')) l.addHandler(lh) @@ -254,6 +258,8 @@ class PFilter (object): def filter(self, record): record._threadid = get_thread_id() return True + + _pfilter = PFilter() @@ -273,43 +279,12 @@ def retry_on_signal(function): raise -class Counter (object): - """Stateful counter for CTR mode crypto""" - def __init__(self, nbits, initial_value=long(1), overflow=long(0)): - self.blocksize = nbits / 8 - self.overflow = overflow - # start with value - 1 so we don't have to store intermediate values when counting - # could the iv be 0? - if initial_value == 0: - self.value = array.array('c', max_byte * self.blocksize) - else: - x = deflate_long(initial_value - 1, add_sign_padding=False) - self.value = array.array('c', zero_byte * (self.blocksize - len(x)) + x) - - def __call__(self): - """Increament the counter and return the new value""" - i = self.blocksize - 1 - while i > -1: - c = self.value[i] = byte_chr((byte_ord(self.value[i]) + 1) % 256) - if c != zero_byte: - return self.value.tostring() - i -= 1 - # counter reset - x = deflate_long(self.overflow, add_sign_padding=False) - self.value = array.array('c', zero_byte * (self.blocksize - len(x)) + x) - return self.value.tostring() - - @classmethod - def new(cls, nbits, initial_value=long(1), overflow=long(0)): - return cls(nbits, initial_value=initial_value, overflow=overflow) - - def constant_time_bytes_eq(a, b): if len(a) != len(b): return False res = 0 # noinspection PyUnresolvedReferences - for i in (xrange if PY2 else range)(len(a)): + for i in (xrange if PY2 else range)(len(a)): # noqa: F821 res |= byte_ord(a[i]) ^ byte_ord(b[i]) return res == 0 diff --git a/paramiko/win_pageant.py b/paramiko/win_pageant.py index 4b482bee..c8c2c7bc 100644 --- a/paramiko/win_pageant.py +++ b/paramiko/win_pageant.py @@ -25,13 +25,13 @@ import array import ctypes.wintypes import platform import struct -from paramiko.util import * +from paramiko.util import * # noqa from paramiko.py3compat import b try: - import _thread as thread # Python 3.x + import _thread as thread # Python 3.x except ImportError: - import thread # Python 2.5-2.7 + import thread # Python 2.5-2.7 from . import _winapi @@ -57,7 +57,10 @@ def can_talk_to_agent(): return bool(_get_pageant_window_object()) -ULONG_PTR = ctypes.c_uint64 if platform.architecture()[0] == '64bit' else ctypes.c_uint32 +if platform.architecture()[0] == '64bit': + ULONG_PTR = ctypes.c_uint64 +else: + ULONG_PTR = ctypes.c_uint32 class COPYDATASTRUCT(ctypes.Structure): @@ -91,7 +94,7 @@ def _query_pageant(msg): with pymap: pymap.write(msg) # Create an array buffer containing the mapped filename - char_buffer = array.array("b", b(map_name) + zero_byte) + char_buffer = array.array("b", b(map_name) + zero_byte) # noqa char_buffer_address, char_buffer_size = char_buffer.buffer_info() # Create a string to use for the SendMessage function call cds = COPYDATASTRUCT(_AGENT_COPYDATA_ID, char_buffer_size, @@ -1,2 +1,10 @@ [wheel] universal = 1 + +[coverage:run] +omit = paramiko/_winapi.py + +[flake8] +exclude = sites,.git,build,dist,demos,tests +ignore = E124,E125,E128,E261,E301,E302,E303,E402 +max-line-length = 79 @@ -16,6 +16,13 @@ # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA. +import sys +from setuptools import setup + +if sys.platform == 'darwin': + import setup_helper + + setup_helper.install_custom_make_tarball() longdesc = ''' This is a library for making SSH2 connections (client or server). @@ -24,35 +31,12 @@ connections between python scripts. All major ciphers and hash methods are supported. SFTP client and server mode are both supported too. Required packages: - pyCrypto + Cryptography -To install the `in-development version -<https://github.com/paramiko/paramiko/tarball/master#egg=paramiko-dev>`_, use -`pip install paramiko==dev`. +To install the development version, ``pip install -e +git+https://github.com/paramiko/paramiko/#egg=paramiko``. ''' -# if someday we want to *require* setuptools, uncomment this: -# (it will cause setuptools to be automatically downloaded) -#import ez_setup -#ez_setup.use_setuptools() - -import sys -try: - from setuptools import setup - kw = { - 'install_requires': [ - 'pycrypto >= 2.1, != 2.4', - 'ecdsa >= 0.11', - ], - } -except ImportError: - from distutils.core import setup - kw = {} - -if sys.platform == 'darwin': - import setup_helper - setup_helper.install_custom_make_tarball() - # Version info -- read without importing _locals = {} @@ -60,22 +44,22 @@ with open('paramiko/_version.py') as fp: exec(fp.read(), None, _locals) version = _locals['__version__'] - setup( - name = "paramiko", - version = version, - description = "SSH2 protocol library", - long_description = longdesc, - author = "Jeff Forcier", - author_email = "jeff@bitprophet.org", - url = "https://github.com/paramiko/paramiko/", - packages = [ 'paramiko' ], - license = 'LGPL', - platforms = 'Posix; MacOS X; Windows', - classifiers = [ + name="paramiko", + version=version, + description="SSH2 protocol library", + long_description=longdesc, + author="Jeff Forcier", + author_email="jeff@bitprophet.org", + url="https://github.com/paramiko/paramiko/", + packages=['paramiko'], + license='LGPL', + platforms='Posix; MacOS X; Windows', + classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', - 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)', + 'License :: OSI Approved :: ' + 'GNU Library or Lesser General Public License (LGPL)', 'Operating System :: OS Independent', 'Topic :: Internet', 'Topic :: Security :: Cryptography', @@ -87,6 +71,12 @@ setup( 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + ], + install_requires=[ + 'bcrypt>=3.0.0', + 'cryptography>=1.1', + 'pynacl>=1.0.1', + 'pyasn1>=0.1.7', ], - **kw ) diff --git a/setup_helper.py b/setup_helper.py index ff6b0e16..c359a16c 100644 --- a/setup_helper.py +++ b/setup_helper.py @@ -30,9 +30,42 @@ import distutils.archive_util from distutils.dir_util import mkpath from distutils.spawn import spawn - -def make_tarball(base_name, base_dir, compress='gzip', - verbose=False, dry_run=False): +try: + from pwd import getpwnam +except ImportError: + getpwnam = None + +try: + from grp import getgrnam +except ImportError: + getgrnam = None + +def _get_gid(name): + """Returns a gid, given a group name.""" + if getgrnam is None or name is None: + return None + try: + result = getgrnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def _get_uid(name): + """Returns an uid, given a user name.""" + if getpwnam is None or name is None: + return None + try: + result = getpwnam(name) + except KeyError: + result = None + if result is not None: + return result[2] + return None + +def make_tarball(base_name, base_dir, compress='gzip', verbose=0, dry_run=0, + owner=None, group=None): """Create a tar file from all the files under 'base_dir'. This file may be compressed. @@ -44,7 +77,7 @@ def make_tarball(base_name, base_dir, compress='gzip', For 'gzip' and 'bzip2' the internal tarfile module will be used. For 'compress' the .tar will be created using tarfile, and then we will spawn 'compress' afterwards. - The output tar file will be named 'base_name' + ".tar", + The output tar file will be named 'base_name' + ".tar", possibly plus the appropriate compression extension (".gz", ".bz2" or ".Z"). Return the output filename. """ @@ -54,12 +87,14 @@ def make_tarball(base_name, base_dir, compress='gzip', # "create a tree of hardlinks" step! (Would also be nice to # detect GNU tar to use its 'z' option and save a step.) - compress_ext = { 'gzip': ".gz", - 'bzip2': '.bz2', - 'compress': ".Z" } + compress_ext = { + 'gzip': ".gz", + 'bzip2': '.bz2', + 'compress': ".Z", + } # flags for compression program, each element of list will be an argument - tarfile_compress_flag = {'gzip':'gz', 'bzip2':'bz2'} + tarfile_compress_flag = {'gzip': 'gz', 'bzip2': 'bz2'} compress_flags = {'compress': ["-f"]} if compress is not None and compress not in compress_ext.keys(): @@ -75,11 +110,30 @@ def make_tarball(base_name, base_dir, compress='gzip', mkpath(os.path.dirname(archive_name), dry_run=dry_run) log.info('Creating tar file %s with mode %s' % (archive_name, mode)) + uid = _get_uid(owner) + gid = _get_gid(group) + + def _set_uid_gid(tarinfo): + if gid is not None: + tarinfo.gid = gid + tarinfo.gname = group + if uid is not None: + tarinfo.uid = uid + tarinfo.uname = owner + return tarinfo + if not dry_run: tar = tarfile.open(archive_name, mode=mode) # This recursively adds everything underneath base_dir - tar.add(base_dir) - tar.close() + try: + try: + # Support for the `filter' parameter was added in Python 2.7, + # earlier versions will raise TypeError. + tar.add(base_dir, filter=_set_uid_gid) + except TypeError: + tar.add(base_dir) + finally: + tar.close() if compress and compress not in tarfile_compress_flag: spawn([compress] + compress_flags[compress] + [archive_name], @@ -92,11 +146,10 @@ def make_tarball(base_name, base_dir, compress='gzip', _custom_formats = { 'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), 'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), - 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"), - 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"), + 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"), + 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"), } # Hack in and insert ourselves into the distutils code base def install_custom_make_tarball(): distutils.archive_util.ARCHIVE_FORMATS.update(_custom_formats) - diff --git a/sites/shared_conf.py b/sites/shared_conf.py index 4a6a5c4e..99fab315 100644 --- a/sites/shared_conf.py +++ b/sites/shared_conf.py @@ -12,7 +12,6 @@ html_theme_options = { 'description': "A Python implementation of SSHv2.", 'github_user': 'paramiko', 'github_repo': 'paramiko', - 'gratipay_user': 'bitprophet', 'analytics_id': 'UA-18486793-2', 'travis_button': True, } diff --git a/sites/www/changelog.rst b/sites/www/changelog.rst index bb93f885..57bc306d 100644 --- a/sites/www/changelog.rst +++ b/sites/www/changelog.rst @@ -2,6 +2,361 @@ Changelog ========= +* :feature:`869` Add an ``auth_timeout`` kwarg to `SSHClient.connect + <paramiko.client.SSHClient.connect>` (default: 30s) to avoid hangs when the + remote end becomes unresponsive during the authentication step. Credit to + ``@timsavage``. +* :support:`921` Tighten up the ``__hash__`` implementation for various key + classes; less code is good code. Thanks to Francisco Couzo for the patch. +* :bug:`983` Move ``sha1`` above the now-arguably-broken ``md5`` in the list of + preferred MAC algorithms, as an incremental security improvement for users + whose target systems offer both. Credit: Pierce Lopez. +* :bug:`667` The RC4/arcfour family of ciphers has been broken since version + 2.0; but since the algorithm is now known to be completely insecure, we are + opting to remove support outright instead of fixing it. Thanks to Alex Gaynor + for catch & patch. +* :feature:`857` Allow `SSHClient.set_missing_host_key_policy + <paramiko.client.SSHClient.set_missing_host_key_policy>` to accept policy + classes _or_ instances, instead of only instances, thus fixing a + long-standing gotcha for unaware users. +* :feature:`951` Add support for ECDH key exchange (kex), specifically the + algorithms ``ecdh-sha2-nistp256``, ``ecdh-sha2-nistp384``, and + ``ecdh-sha2-nistp521``. They now come before the older ``diffie-hellman-*`` + family of kex algorithms in the preferred-kex list. Thanks to Shashank + Veerapaneni for the patch & Pierce Lopez for a follow-up. +* :support:`- backported` A big formatting pass to clean up an enormous number + of invalid Sphinx reference links, discovered by switching to a modern, + rigorous nitpicking doc-building mode. +* :bug:`900` (via :issue:`911`) Prefer newer ``ecdsa-sha2-nistp`` keys over RSA + and DSA keys during host key selection. This improves compatibility with + OpenSSH, both in terms of general behavior, and also re: ability to properly + leverage OpenSSH-modified ``known_hosts`` files. Credit: ``@kasdoe`` for + original report/PR and Pierce Lopez for the second draft. +* :bug:`794` (via :issue:`981`) Prior support for ``ecdsa-sha2-nistp(384|521)`` + algorithms didn't fully extend to covering host keys, preventing connection + to hosts which only offer these key types and no others. This is now fixed. + Thanks to ``@ncoult`` and ``@kasdoe`` for reports and Pierce Lopez for the + patch. +* :feature:`325` (via :issue:`972`) Add Ed25519 support, for both host keys + and user authentication. Big thanks to Alex Gaynor for the patch. + + .. note:: + This change adds the ``bcrypt`` and ``pynacl`` Python libraries as + dependencies. No C-level dependencies beyond those previously required (for + Cryptography) have been added. + +* :support:`974 backported` Overhaul the codebase to be PEP-8, etc, compliant + (i.e. passes the maintainer's preferred `flake8 <http://flake8.pycqa.org/>`_ + configuration) and add a ``flake8`` step to the Travis config. Big thanks to + Dorian Pula! +* :bug:`683` Make ``util.log_to_file`` append instead of replace. Thanks + to ``@vlcinsky`` for the report. +* :release:`2.1.2 <2017-02-20>` +* :release:`2.0.5 <2017-02-20>` +* :release:`1.18.2 <2017-02-20>` +* :release:`1.17.4 <2017-02-20>` +* :bug:`853 (1.17+)` Tweak how `RSAKey.__str__ <paramiko.rsakey.RSAKey>` + behaves so it doesn't cause ``TypeError`` under Python 3. Thanks to Francisco + Couzo for the report. +* :bug:`862 (1.17+)` (via :issue:`863`) Avoid test suite exceptions on + platforms lacking ``errno.ETIME`` (which seems to be some FreeBSD and some + Windows environments.) Thanks to Sofian Brabez. +* :bug:`44 (1.17+)` (via :issue:`891`) `SSHClient <paramiko.client.SSHClient>` + now gives its internal `Transport <paramiko.transport.Transport>` a handle on + itself, preventing garbage collection of the client until the session is + closed. Without this, some code which returns stream or transport objects + without the client that generated them, would result in premature session + closure when the client was GCd. Credit: ``@w31rd0`` for original report, + Omer Anson for the patch. +* :bug:`713 (<2.0)` (via :issue:`714` and :issue:`889`) Don't pass + initialization vectors to PyCrypto when dealing with counter-mode ciphers; + newer PyCrypto versions throw an exception otherwise (older ones simply + ignored this parameter altogether). Thanks to ``@jmh045000`` for report & + patches. +* :bug:`895 (1.17+)` Fix a bug in server-mode concerning multiple interactive + auth steps (which were incorrectly responded to). Thanks to Dennis + Kaarsemaker for catch & patch. +* :support:`866 backported (1.17+)` (also :issue:`838`) Remove an old + test-related file we don't support, and add PyPy to Travis-CI config. Thanks + to Pierce Lopez for the final patch and Pedro Rodrigues for an earlier + edition. +* :release:`2.1.1 <2016-12-12>` +* :release:`2.0.4 <2016-12-12>` +* :release:`1.18.1 <2016-12-12>` +* :bug:`859 (1.18+)` (via :issue:`860`) A tweak to the original patch + implementing :issue:`398` was not fully applied, causing calls to + `~paramiko.client.SSHClient.invoke_shell` to fail with ``AttributeError``. + This has been fixed. Patch credit: Kirk Byers. +* :bug:`-` Accidentally merged the new features from 1.18.0 into the + 2.0.x bugfix-only branch. This included merging a bug in one of those new + features (breaking `~paramiko.client.SSHClient.invoke_shell` with an + ``AttributeError``.) The offending code has been stripped out of the 2.0.x + line (but of course, remains in 2.1.x and above.) +* :bug:`859` (via :issue:`860`) A tweak to the original patch implementing + :issue:`398` was not fully applied, causing calls to + `~paramiko.client.SSHClient.invoke_shell` to fail with ``AttributeError``. + This has been fixed. Patch credit: Kirk Byers. +* :release:`2.1.0 <2016-12-09>` +* :release:`2.0.3 <2016-12-09>` +* :release:`1.18.0 <2016-12-09>` +* :release:`1.17.3 <2016-12-09>` +* :bug:`802 (1.17+)` (via :issue:`804`) Update our vendored Windows API module + to address errors of the form ``AttributeError: 'module' object has no + attribute 'c_ssize_t'``. Credit to Jason R. Coombs. +* :bug:`824 (1.17+)` Fix the implementation of ``PKey.write_private_key_file`` + (this method is only publicly defined on subclasses; the fix was in the + private real implementation) so it passes the correct params to ``open()``. + This bug apparently went unnoticed and unfixed for 12 entire years. Congrats + to John Villalovos for noticing & submitting the patch! +* :support:`801 backported (1.17+)` Skip a Unix-only test when on Windows; + thanks to Gabi Davar. +* :support:`792 backported (1.17+)` Minor updates to the README and demos; + thanks to Alan Yee. +* :feature:`780 (1.18+)` (also :issue:`779`, and may help users affected by + :issue:`520`) Add an optional ``timeout`` parameter to + `Transport.start_client <paramiko.transport.Transport.start_client>` (and + feed it the value of the configured connection timeout when used within + `SSHClient <paramiko.client.SSHClient>`.) This helps prevent situations where + network connectivity isn't timing out, but the remote server is otherwise + unable to service the connection in a timely manner. Credit to + ``@sanseihappa``. +* :bug:`742` (also re: :issue:`559`) Catch ``AssertionError`` thrown by + Cryptography when attempting to load bad ECDSA keys, turning it into an + ``SSHException``. This moves the behavior in line with other "bad keys" + situations, re: Paramiko's main auth loop. Thanks to MengHuan Yu for the + patch. +* :bug:`789 (1.17+)` Add a missing ``.closed`` attribute (plus ``._closed`` + because reasons) to `ProxyCommand <paramiko.proxy.ProxyCommand>` so the + earlier partial fix for :issue:`520` works in situations where one is + gatewaying via ``ProxyCommand``. +* :bug:`334 (1.17+)` Make the ``subprocess`` import in ``proxy.py`` lazy so + users on platforms without it (such as Google App Engine) can import Paramiko + successfully. (Relatedly, make it easier to tweak an active socket check + timeout [in `Transport <paramiko.transport.Transport>`] which was previously + hardcoded.) Credit: Shinya Okano. +* :support:`854 backported (1.17+)` Fix incorrect docstring/param-list for + `Transport.auth_gssapi_keyex + <paramiko.transport.Transport.auth_gssapi_keyex>` so it matches the real + signature. Caught by ``@Score_Under``. +* :bug:`681 (1.17+)` Fix a Python3-specific bug re: the handling of read + buffers when using ``ProxyCommand``. Thanks to Paul Kapp for catch & patch. +* :feature:`398 (1.18+)` Add an ``environment`` dict argument to + `Client.exec_command <paramiko.client.SSHClient.exec_command>` (plus the + lower level `Channel.update_environment + <paramiko.channel.Channel.update_environment>` and + `Channel.set_environment_variable + <paramiko.channel.Channel.set_environment_variable>` methods) which + implements the ``env`` SSH message type. This means the remote shell + environment can be set without the use of ``VARNAME=value`` shell tricks, + provided the server's ``AcceptEnv`` lists the variables you need to set. + Thanks to Philip Lorenz for the pull request. +* :support:`819 backported (>=1.15,<2.0)` Document how lacking ``gmp`` headers + at install time can cause a significant performance hit if you build PyCrypto + from source. (Most system-distributed packages already have this enabled.) +* :release:`2.0.2 <2016-07-25>` +* :release:`1.17.2 <2016-07-25>` +* :release:`1.16.3 <2016-07-25>` +* :bug:`673 (1.16+)` (via :issue:`681`) Fix protocol banner read errors + (``SSHException``) which would occasionally pop up when using + ``ProxyCommand`` gatewaying. Thanks to ``@Depado`` for the initial report and + Paul Kapp for the fix. +* :bug:`774 (1.16+)` Add a ``_closed`` private attribute to + `~paramiko.channel.Channel` objects so that they continue functioning when + used as proxy sockets under Python 3 (e.g. as ``direct-tcpip`` gateways for + other Paramiko connections.) +* :bug:`758 (1.16+)` Apply type definitions to ``_winapi`` module from + `jaraco.windows <https://github.com/jaraco/jaraco.windows>`_ 3.6.1. This + should address issues on Windows platforms that often result in errors like + ``ArgumentError: [...] int too long to convert``. Thanks to ``@swohlerLL`` + for the report and Jason R. Coombs for the patch. +* :release:`2.0.1 <2016-06-21>` +* :release:`1.17.1 <2016-06-21>` +* :release:`1.16.2 <2016-06-21>` +* :bug:`520 (1.16+)` (Partial fix) Fix at least one instance of race condition + driven threading hangs at end of the Python interpreter session. (Includes a + docs update as well - always make sure to ``.close()`` your clients!) +* :bug:`537 (1.16+)` Fix a bug in `BufferedPipe.set_event + <paramiko.buffered_pipe.BufferedPipe.set_event>` which could cause + deadlocks/hangs when one uses `select.select` against + `~paramiko.channel.Channel` objects (or otherwise calls `Channel.fileno + <paramiko.channel.Channel.fileno>` after the channel has closed). Thanks to + Przemysław Strzelczak for the report & reproduction case, and to Krzysztof + Rusek for the fix. +* :release:`2.0.0 <2016-04-28>` +* :release:`1.17.0 <2016-04-28>` +* :release:`1.16.1 <2016-04-28>` +* :release:`1.15.5 <2016-04-28>` +* :feature:`731` (working off the earlier :issue:`611`) Add support for 384- + and 512-bit elliptic curve groups in ECDSA key types (aka + ``ecdsa-sha2-nistp384`` / ``ecdsa-sha2-nistp521``). Thanks to Michiel Tiller + and ``@CrazyCasta`` for the patches. +* :bug:`670` Due to an earlier bugfix, less-specific ``Host`` blocks' + ``ProxyCommand`` values were overriding ``ProxyCommand none`` in + more-specific ``Host`` blocks. This has been fixed in a backwards compatible + manner (i.e. ``ProxyCommand none`` continues to appear as a total lack of any + ``proxycommand`` key in parsed config structures). Thanks to Pat Brisbin for + the catch. +* :bug:`676` (via :issue:`677`) Fix a backwards incompatibility issue that + cropped up in `SFTPFile.prefetch <paramiko.sftp_file.SFTPFile.prefetch>` re: + the erroneously non-optional ``file_size`` parameter. Should only affect + users who manually call ``prefetch``. Thanks to ``@stevevanhooser`` for catch + & patch. +* :feature:`394` Replace PyCrypto with the Python Cryptographic Authority + (PyCA) 'Cryptography' library suite. This improves security, installability, + and performance; adds PyPy support; and much more. + + There aren't enough ways to thank Alex Gaynor for all of his work on this, + and then his patience while the maintainer let his PR grow moss for a year + and change. Paul Kehrer came in with an assist, and I think I saw Olle + Lundberg, ``@techtonik`` and ``@johnthagen`` supplying backup as well. Thanks + to all! + + .. warning:: + **This is a backwards incompatible change.** + + However, **it should only affect installation** requirements; **no API + changes are intended or expected**. Please report any such breakages as + bugs. + + See our updated :doc:`installation docs <installing>` for details on what + is now required to install Paramiko; many/most users should be able to + simply ``pip install -U paramiko`` (especially if you **upgrade to pip + 8**). + +* :bug:`577` (via :issue:`578`; should also fix :issue:`718`, :issue:`560`) Fix + stalled/hung SFTP downloads by cleaning up some threading lock issues. Thanks + to Stephen C. Pope for the patch. +* :bug:`716` Fix a Python 3 compatibility issue when handling two-factor + authentication. Thanks to Mateusz Kowalski for the catch & original patch. +* :support:`729 backported (>=1.15,<2.0)` Clean up ``setup.py`` to always use + ``setuptools``, not doing so was a historical artifact from bygone days. + Thanks to Alex Gaynor. +* :bug:`649 major (==1.17)` Update the module in charge of handling SSH moduli + so it's consistent with OpenSSH behavior re: prime number selection. Thanks + to Damien Tournoud for catch & patch. +* :bug:`617` (aka `fabric/fabric#1429 + <https://github.com/fabric/fabric/issues/1429>`_; via :issue:`679`; related: + :issue:`678`, :issue:`685`, :issue:`615` & :issue:`616`) Fix up + `~paramiko.ssh_exception.NoValidConnectionsError` so it pickles correctly, + and fix a related Python 3 compatibility issue. Thanks to Rebecca Schlussel + for the report & Marius Gedminas for the patch. +* :bug:`613` (via :issue:`619`) Update to ``jaraco.windows`` 3.4.1 to fix some + errors related to ``ctypes`` on Windows platforms. Credit to Jason R. Coombs. +* :support:`621 backported (>=1.15,<2.0)` Annotate some public attributes on + `~paramiko.channel.Channel` such as ``.closed``. Thanks to Sergey Vasilyev + for the report. +* :bug:`632` Fix logic bug in the SFTP client's callback-calling functionality; + previously there was a chance the given callback would fire twice at the end + of a transfer. Thanks to ``@ab9-er`` for catch & original patch. +* :support:`612 backported (>=1.15,<2.0)` Identify & work around a race + condition in the test for handshake timeouts, which was causing frequent test + failures for a subset of contributors as well as Travis-CI (usually, but not + always, limited to Python 3.5). Props to Ed Kellett for assistance during + some of the troubleshooting. +* :support:`697 backported (>=1.15,<2.0)` Remove whitespace in our + ``setup.py``'s ``install_requires`` as it triggers occasional bugs in some + versions of ``setuptools``. Thanks to Justin Lecher for catch & original + patch. +* :bug:`499` Strip trailing/leading whitespace from lines when parsing SSH + config files - this brings things in line with OpenSSH behavior. Thanks to + Alfredo Esteban for the original report and Nick Pillitteri for the patch. +* :bug:`652` Fix behavior of ``gssapi-with-mic`` auth requests so they fail + gracefully (allowing followup via other auth methods) instead of raising an + exception. Patch courtesy of ``@jamercee``. +* :feature:`588 (==1.17)` Add missing file-like object methods for + `~paramiko.file.BufferedFile` and `~paramiko.sftp_file.SFTPFile`. Thanks to + Adam Meily for the patch. +* :support:`636 backported (>=1.15,<2.0)` Clean up and enhance the README (and + rename it to ``README.rst`` from just ``README``). Thanks to ``@LucasRMehl``. +* :release:`1.16.0 <2015-11-04>` +* :bug:`194 major` (also :issue:`562`, :issue:`530`, :issue:`576`) Streamline + use of ``stat`` when downloading SFTP files via `SFTPClient.get + <paramiko.sftp_client.SFTPClient.get>`; this avoids triggering bugs in some + off-spec SFTP servers such as IBM Sterling. Thanks to ``@muraleee`` for the + initial report and to Torkil Gustavsen for the patch. +* :feature:`467` (also :issue:`139`, :issue:`412`) Fully enable two-factor + authentication (e.g. when a server requires ``AuthenticationMethods + pubkey,keyboard-interactive``). Thanks to ``@perryjrandall`` for the patch + and to ``@nevins-b`` and Matt Robenolt for additional support. +* :bug:`502 major` Fix 'exec' requests in server mode to use ``get_string`` + instead of ``get_text`` to avoid ``UnicodeDecodeError`` on non-UTF-8 input. + Thanks to Anselm Kruis for the patch & discussion. +* :bug:`401` Fix line number reporting in log output regarding invalid + ``known_hosts`` line entries. Thanks to Dylan Thacker-Smith for catch & + patch. +* :support:`525 backported` Update the vendored Windows API addon to a more + recent edition. Also fixes :issue:`193`, :issue:`488`, :issue:`498`. Thanks + to Jason Coombs. +* :release:`1.15.4 <2015-11-02>` +* :release:`1.14.3 <2015-11-02>` +* :release:`1.13.4 <2015-11-02>` +* :bug:`366` Fix `~paramiko.sftp_attr.SFTPAttributes` so its string + representation doesn't raise exceptions on empty/initialized instances. Patch + by Ulrich Petri. +* :bug:`359` Use correct attribute name when trying to use Python 3's + ``int.bit_length`` method; prior to fix, the Python 2 custom fallback + implementation was always used, even on Python 3. Thanks to Alex Gaynor. +* :support:`594 backported` Correct some post-Python3-port docstrings to + specify ``bytes`` type instead of ``str``. Credit to ``@redixin``. +* :bug:`565` Don't explode with ``IndexError`` when reading private key files + lacking an ``-----END <type> PRIVATE KEY-----`` footer. Patch courtesy of + Prasanna Santhanam. +* :feature:`604` Add support for the ``aes192-ctr`` and ``aes192-cbc`` ciphers. + Thanks to Michiel Tiller for noticing it was as easy as tweaking some key + sizes :D +* :feature:`356` (also :issue:`596`, :issue:`365`, :issue:`341`, :issue:`164`, + :issue:`581`, and a bunch of other duplicates besides) Add support for SHA-2 + based key exchange (kex) algorithm ``diffie-hellman-group-exchange-sha256`` + and (H)MAC algorithms ``hmac-sha2-256`` and ``hmac-sha2-512``. + + This change includes tweaks to debug-level logging regarding + algorithm-selection handshakes; the old all-in-one log line is now multiple + easier-to-read, printed-at-handshake-time log lines. + + Thanks to the many people who submitted patches for this functionality and/or + assisted in testing those patches. That list includes but is not limited to, + and in no particular order: Matthias Witte, Dag Wieers, Ash Berlin, Etienne + Perot, Gert van Dijk, ``@GuyShaanan``, Aaron Bieber, ``@cyphase``, and Eric + Brown. +* :release:`1.15.3 <2015-10-02>` +* :support:`554 backported` Fix inaccuracies in the docstring for the ECDSA key + class. Thanks to Jared Hance for the patch. +* :support:`516 backported` Document `~paramiko.agent.AgentRequestHandler`. + Thanks to ``@toejough`` for report & suggestions. +* :bug:`496 (1.15+)` Fix a handful of small but critical bugs in Paramiko's + GSSAPI support (note: this includes switching from PyCrypo's Random to + `os.urandom`). Thanks to Anselm Kruis for catch & patch. +* :bug:`491` (combines :issue:`62` and :issue:`439`) Implement timeout + functionality to address hangs from dropped network connections and/or failed + handshakes. Credit to ``@vazir`` and ``@dacut`` for the original patches and + to Olle Lundberg for reimplementation. +* :bug:`490` Skip invalid/unparseable lines in ``known_hosts`` files, instead + of raising `~paramiko.ssh_exception.SSHException`. This brings Paramiko's + behavior more in line with OpenSSH, which silently ignores such input. Catch + & patch courtesy of Martin Topholm. +* :bug:`404` Print details when displaying + `~paramiko.ssh_exception.BadHostKeyException` objects (expected vs received + data) instead of just "hey shit broke". Patch credit: Loic Dachary. +* :bug:`469` (also :issue:`488`, :issue:`461` and like a dozen others) Fix a + typo introduced in the 1.15 release which broke WinPageant support. Thanks to + everyone who submitted patches, and to Steve Cohen who was the lucky winner + of the cherry-pick lottery. +* :bug:`353` (via :issue:`482`) Fix a bug introduced in the Python 3 port + which caused ``OverFlowError`` (and other symptoms) in SFTP functionality. + Thanks to ``@dboreham`` for leading the troubleshooting charge, and to + Scott Maxwell for the final patch. +* :support:`582` Fix some old ``setup.py`` related helper code which was + breaking ``bdist_dumb`` on Mac OS X. Thanks to Peter Odding for the patch. +* :bug:`22 major` Try harder to connect to multiple network families (e.g. IPv4 + vs IPv6) in case of connection issues; this helps with problems such as hosts + which resolve both IPv4 and IPv6 addresses but are only listening on IPv4. + Thanks to Dries Desmet for original report and Torsten Landschoff for the + foundational patchset. +* :bug:`402` Check to see if an SSH agent is actually present before trying to + forward it to the remote end. This replaces what was usually a useless + ``TypeError`` with a human-readable + `~paramiko.ssh_exception.AuthenticationException`. Credit to Ken Jordan for + the fix and Yvan Marques for original report. * :release:`1.15.2 <2014-12-19>` * :release:`1.14.2 <2014-12-19>` * :release:`1.13.3 <2014-12-19>` @@ -25,7 +380,7 @@ Changelog use of the ``shlex`` module. Thanks to Yan Kalchevskiy. * :support:`422 backported` Clean up some unused imports. Courtesy of Olle Lundberg. -* :support:`421 backported` Modernize threading calls to user newer API. Thanks +* :support:`421 backported` Modernize threading calls to use newer API. Thanks to Olle Lundberg. * :support:`419 backported` Modernize a bunch of the codebase internals to leverage decorators. Props to ``@beckjake`` for realizing we're no longer on @@ -46,10 +401,12 @@ Changelog * :release:`1.15.1 <2014-09-22>` * :bug:`399` SSH agent forwarding (potentially other functionality as well) would hang due to incorrect values passed into the new window size - arguments for `.Transport` (thanks to a botched merge). This has been - corrected. Thanks to Dylan Thacker-Smith for the report & patch. -* :feature:`167` Add `.SSHConfig.get_hostnames` for easier introspection of a - loaded SSH config file or object. Courtesy of Søren Løvborg. + arguments for `~paramiko.transport.Transport` (thanks to a botched merge). + This has been corrected. Thanks to Dylan Thacker-Smith for the report & + patch. +* :feature:`167` Add `~paramiko.config.SSHConfig.get_hostnames` for easier + introspection of a loaded SSH config file or object. Courtesy of Søren + Løvborg. * :release:`1.15.0 <2014-09-18>` * :support:`393` Replace internal use of PyCrypto's ``SHA.new`` with the stdlib's ``hashlib.sha1``. Thanks to Alex Gaynor. @@ -58,10 +415,10 @@ Changelog (:ref:`installation docs here <gssapi>`). Mega thanks to Sebastian Deiß, with assist by Torsten Landschoff. - .. note:: - Unix users should be aware that the ``python-gssapi`` library (a - requirement for using this functionality) only appears to support - Python 2.7 and up at this time. + .. note:: + Unix users should be aware that the ``python-gssapi`` library (a + requirement for using this functionality) only appears to support + Python 2.7 and up at this time. * :bug:`346 major` Fix an issue in private key files' encryption salts that could cause tracebacks and file corruption if keys were re-encrypted. Credit @@ -157,7 +514,7 @@ Changelog Plugaru. * :bug:`-` Fix logging error in sftp_client for filenames containing the '%' character. Thanks to Antoine Brenner. -* :bug:`308` Fix regression in dsskey.py that caused sporadic signature +* :bug:`308` Fix regression in dsskey.py that caused sporadic signature verification failures. Thanks to Chris Rose. * :support:`299` Use deterministic signatures for ECDSA keys for improved security. Thanks to Alex Gaynor. @@ -180,7 +537,7 @@ Changelog * :feature:`16` **Python 3 support!** Our test suite passes under Python 3, and it (& Fabric's test suite) continues to pass under Python 2. **Python 2.5 is no longer supported with this change!** - + The merged code was built on many contributors' efforts, both code & feedback. In no particular order, we thank Daniel Goertzen, Ivan Kolodyazhny, Tomi Pieviläinen, Jason R. Coombs, Jan N. Schulze, ``@Lazik``, Dorian Pula, diff --git a/sites/www/conf.py b/sites/www/conf.py index 0b0fb85c..c7ba0a86 100644 --- a/sites/www/conf.py +++ b/sites/www/conf.py @@ -8,8 +8,7 @@ from shared_conf import * # Releases changelog extension extensions.append('releases') -# Paramiko 1.x tags start with 'v'. Meh. -releases_release_uri = "https://github.com/paramiko/paramiko/tree/v%s" +releases_release_uri = "https://github.com/paramiko/paramiko/tree/%s" releases_issue_uri = "https://github.com/paramiko/paramiko/issues/%s" # Default is 'local' building, but reference the public docs site when building diff --git a/sites/www/faq.rst b/sites/www/faq.rst index a5d9b383..74b7501e 100644 --- a/sites/www/faq.rst +++ b/sites/www/faq.rst @@ -24,3 +24,13 @@ However, **closed does not imply locked** - affected users can still post comments on such tickets - and **we will always consider actual patch submissions for these issues**, provided they can get +1s from similarly affected users and are proven to not break existing functionality. + +I'm having strange issues with my code hanging at shutdown! +=========================================================== + +Make sure you explicitly ``.close()`` your connection objects (usually +``SSHClient``) if you're having any sort of hang/freeze at shutdown time! + +Doing so isn't strictly necessary 100% of the time, but it is almost always the +right solution if you run into the various corner cases that cause race +conditions, etc. diff --git a/sites/www/index.rst b/sites/www/index.rst index 1b609709..f0a5db8a 100644 --- a/sites/www/index.rst +++ b/sites/www/index.rst @@ -3,8 +3,9 @@ Welcome to Paramiko! Paramiko is a Python (2.6+, 3.3+) implementation of the SSHv2 protocol [#]_, providing both client and server functionality. While it leverages a Python C -extension for low level cryptography (`PyCrypto <http://pycrypto.org>`_), -Paramiko itself is a pure Python interface around SSH networking concepts. +extension for low level cryptography +(`Cryptography <https://cryptography.io>`_), Paramiko itself is a pure Python +interface around SSH networking concepts. This website covers project information for Paramiko such as the changelog, contribution guidelines, development roadmap, news/blog, and so forth. Detailed @@ -19,6 +20,7 @@ Please see the sidebar to the left to begin. changelog FAQs <faq> installing + installing-1.x contributing contact @@ -26,11 +28,7 @@ Please see the sidebar to the left to begin. .. rubric:: Footnotes .. [#] - SSH is defined in RFCs - `4251 <http://www.rfc-editor.org/rfc/rfc4251.txt>`_, - `4252 <http://www.rfc-editor.org/rfc/rfc4252.txt>`_, - `4253 <http://www.rfc-editor.org/rfc/rfc4253.txt>`_, and - `4254 <http://www.rfc-editor.org/rfc/rfc4254.txt>`_; - the primary working implementation of the protocol is the `OpenSSH project + SSH is defined in :rfc:`4251`, :rfc:`4252`, :rfc:`4253` and :rfc:`4254`. The + primary working implementation of the protocol is the `OpenSSH project <http://openssh.org>`_. Paramiko implements a large portion of the SSH feature set, but there are occasional gaps. diff --git a/sites/www/installing-1.x.rst b/sites/www/installing-1.x.rst new file mode 100644 index 00000000..8ede40d5 --- /dev/null +++ b/sites/www/installing-1.x.rst @@ -0,0 +1,121 @@ +================ +Installing (1.x) +================ + +.. note:: Installing Paramiko 2.0 or above? See :doc:`installing` instead. + +This document includes legacy notes on installing Paramiko 1.x (specifically, +1.13 and up). Users are strongly encouraged to upgrade to 2.0 when possible; +PyCrypto (the dependency covered below) is no longer maintained and contains +security vulnerabilities. + +General install notes +===================== + +* Python 2.6+ and 3.3+ are supported; Python <=2.5 and 3.0-3.2 are **not + supported**. +* See the note in the main install doc about :ref:`release-lines` for details + on specific versions you may want to install. + + .. note:: 1.x will eventually be entirely end-of-lifed. +* Paramiko 1.7-1.14 have only one dependency: :ref:`pycrypto`. +* Paramiko 1.15+ (not including 2.x and above) add a second, pure-Python + dependency: the ``ecdsa`` module, trivially installable via PyPI. +* Paramiko 1.15+ (again, not including 2.x and up) also allows you to + optionally install a few more dependencies to gain support for + :ref:`GSS-API/Kerberos <gssapi-on-1x>`. +* Users on Windows may want to opt for the :ref:`pypm` approach. + + +.. _pycrypto: + +PyCrypto +======== + +`PyCrypto <https://www.dlitz.net/software/pycrypto/>`__ provides the low-level +(C-based) encryption algorithms we need to implement the SSH protocol. There +are a couple gotchas associated with installing PyCrypto: its compatibility +with Python's package tools, and the fact that it is a C-based extension. + +C extension +----------- + +Unless you are installing from a precompiled source such as a Debian apt +repository or RedHat RPM, or using :ref:`pypm <pypm>`, you will also need the +ability to build Python C-based modules from source in order to install +PyCrypto. Users on **Unix-based platforms** such as Ubuntu or Mac OS X will +need the traditional C build toolchain installed (e.g. Developer Tools / XCode +Tools on the Mac, or the ``build-essential`` package on Ubuntu or Debian Linux +-- basically, anything with ``gcc``, ``make`` and so forth) as well as the +Python development libraries, often named ``python-dev`` or similar. + +Slow vs fast crypto math +~~~~~~~~~~~~~~~~~~~~~~~~ + +PyCrypto attempts to use the ``gmp`` C math library if it is present on your +system, which enables what it internally calls "fastmath" (``_fastmath.so``). +When those headers are not available, it falls back to "slowmath" +(``_slowmath.py``) which is a pure-Python implementation. + +Real-world tests have shown significant benefits to using the C version of this +code; thus we strongly recommend you install the ``gmp`` development headers +**before** installing Paramiko/PyCrypto. E.g.:: + + $ apt-get install libgmp-dev # or just apt + $ yum install gmp-devel # or dnf + $ brew install gmp + +If you're unsure which version of math you've ended up with, a quick way to +check is to examine whether ``_fastmath.so`` or ``_slowmath.py`` appears in the +output of:: + + from Crypto.PublicKey import RSA + print(RSA._impl._math) + +Windows +~~~~~~~ + +For **Windows** users we recommend using :ref:`pypm`, installing a C +development environment such as `Cygwin <http://cygwin.com>`_ or obtaining a +precompiled Win32 PyCrypto package from `voidspace's Python modules page +<http://www.voidspace.org.uk/python/modules.shtml#pycrypto>`_. + +.. note:: + Some Windows users whose Python is 64-bit have found that the PyCrypto + dependency ``winrandom`` may not install properly, leading to ImportErrors. + In this scenario, you'll probably need to compile ``winrandom`` yourself + via e.g. MS Visual Studio. See `Fabric #194 + <https://github.com/fabric/fabric/issues/194>`_ for info. + + +.. _pypm: + +ActivePython and PyPM +===================== + +Windows users who already have ActiveState's `ActivePython +<http://www.activestate.com/activepython/downloads>`_ distribution installed +may find Paramiko is best installed with `its package manager, PyPM +<http://code.activestate.com/pypm/>`_. Below is example output from an +installation of Paramiko via ``pypm``:: + + C:\> pypm install paramiko + The following packages will be installed into "%APPDATA%\Python" (2.7): + paramiko-1.7.8 pycrypto-2.4 + Get: [pypm-free.activestate.com] paramiko 1.7.8 + Get: [pypm-free.activestate.com] pycrypto 2.4 + Installing paramiko-1.7.8 + Installing pycrypto-2.4 + C:\> + + +.. _gssapi-on-1x: + +Optional dependencies for GSS-API / SSPI / Kerberos +=================================================== + +First, see the main install doc's notes: :ref:`gssapi` - everything there is +required for Paramiko 1.x as well. + +Additionally, users of Paramiko 1.x, on all platforms, need a final dependency: +`pyasn1 <https://pypi.python.org/pypi/pyasn1>`_ ``0.1.7`` or better. diff --git a/sites/www/installing.rst b/sites/www/installing.rst index a657c3fc..f335a9e7 100644 --- a/sites/www/installing.rst +++ b/sites/www/installing.rst @@ -2,6 +2,13 @@ Installing ========== + +.. note:: + These instructions cover Paramiko 2.0 and above. If you're looking to + install Paramiko 1.x, see :doc:`installing-1.x`. However, **the 1.x line + relies on insecure dependencies** so upgrading is strongly encouraged. + + .. _paramiko-itself: Paramiko itself @@ -12,21 +19,15 @@ via `pip <http://pip-installer.org>`_:: $ pip install paramiko -.. note:: - Users who want the bleeding edge can install the development version via - ``pip install paramiko==dev``. - -We currently support **Python 2.6, 2.7 and 3.3+** (Python **3.2** should also -work but has a less-strong compatibility guarantee from us.) Users on Python -2.5 or older are urged to upgrade. +We currently support **Python 2.6, 2.7, 3.3+, and PyPy**. Users on Python 2.5 +or older (or 3.2 or older) are urged to upgrade. -Paramiko has two hard dependencies: the pure-Python ECDSA module ``ecdsa``, and the -PyCrypto C extension. ``ecdsa`` is easily installable from wherever you -obtained Paramiko's package; PyCrypto may require more work. Read on for -details. +Paramiko has only one direct hard dependency: the Cryptography library. See +:ref:`cryptography`. If you need GSS-API / SSPI support, see :ref:`the below subsection on it -<gssapi>` for details on additional dependencies. +<gssapi>` for details on its optional dependencies. + .. _release-lines: @@ -37,71 +38,52 @@ Users desiring stability may wish to pin themselves to a specific release line once they first start using Paramiko; to assist in this, we guarantee bugfixes for the last 2-3 releases including the latest stable one. -If you're unsure which version to install, we have suggestions: +This typically spans major & minor versions, so even if e.g. 3.1 is the latest +stable release, it's likely that bugfixes will occasionally come out for the +latest 2.x and perhaps even 1.x releases, as well as for 3.0. New feature +releases for previous major-version lines are less likely but not unheard of. + +If you're unsure which version to install: * **Completely new users** should always default to the **latest stable release** (as above, whatever is newest / whatever shows up with ``pip install paramiko``.) -* **Users upgrading from a much older version** (e.g. the 1.7.x line) should - probably get the **oldest actively supported line** (see the paragraph above - this list for what that currently is.) +* **Users upgrading from a much older version** (e.g. 1.7.x through 1.10.x) + should probably get the **oldest actively supported line** (check the + :doc:`changelog` for recent releases). * **Everybody else** is hopefully already "on" a given version and can carefully upgrade to whichever version they care to, when their release line stops being supported. -PyCrypto -======== - -`PyCrypto <https://www.dlitz.net/software/pycrypto/>`_ provides the low-level -(C-based) encryption algorithms we need to implement the SSH protocol. There -are a couple gotchas associated with installing PyCrypto: its compatibility -with Python's package tools, and the fact that it is a C-based extension. - -C extension ------------ +.. _cryptography: -Unless you are installing from a precompiled source such as a Debian apt -repository or RedHat RPM, or using :ref:`pypm <pypm>`, you will also need the -ability to build Python C-based modules from source in order to install -PyCrypto. Users on **Unix-based platforms** such as Ubuntu or Mac OS X will -need the traditional C build toolchain installed (e.g. Developer Tools / XCode -Tools on the Mac, or the ``build-essential`` package on Ubuntu or Debian Linux --- basically, anything with ``gcc``, ``make`` and so forth) as well as the -Python development libraries, often named ``python-dev`` or similar. - -For **Windows** users we recommend using :ref:`pypm`, installing a C -development environment such as `Cygwin <http://cygwin.com>`_ or obtaining a -precompiled Win32 PyCrypto package from `voidspace's Python modules page -<http://www.voidspace.org.uk/python/modules.shtml#pycrypto>`_. - -.. note:: - Some Windows users whose Python is 64-bit have found that the PyCrypto - dependency ``winrandom`` may not install properly, leading to ImportErrors. - In this scenario, you'll probably need to compile ``winrandom`` yourself - via e.g. MS Visual Studio. See `Fabric #194 - <https://github.com/fabric/fabric/issues/194>`_ for info. +Cryptography +============ +`Cryptography <https://cryptography.io>`__ provides the low-level (C-based) +encryption algorithms we need to implement the SSH protocol. It has detailed +`installation instructions`_ (and an `FAQ +<https://cryptography.io/en/latest/faq/>`_) which you should read carefully. -.. _pypm: +In general, you'll need one of the following setups: -ActivePython and PyPM -===================== +* On Windows or Mac OS X, provided your ``pip`` is modern (8.x+): nothing else + is required. ``pip`` will install statically compiled binary archives of + Cryptography & its dependencies. +* On Linux, or on other platforms with older versions of ``pip``: you'll need a + C build toolchain, plus development headers for Python, OpenSSL and + ``libffi``. Again, see `Cryptography's install docs`_; these requirements may + occasionally change. -Windows users who already have ActiveState's `ActivePython -<http://www.activestate.com/activepython/downloads>`_ distribution installed -may find Paramiko is best installed with `its package manager, PyPM -<http://code.activestate.com/pypm/>`_. Below is example output from an -installation of Paramiko via ``pypm``:: + .. warning:: + If you go this route, note that **OpenSSL 1.0.1 or newer is effectively + required**. Cryptography 1.3 and older technically allow OpenSSL 0.9.8, but + 1.4 and newer - which Paramiko will gladly install or upgrade, if you e.g. + ``pip install -U`` - drop that support. - C:\> pypm install paramiko - The following packages will be installed into "%APPDATA%\Python" (2.7): - paramiko-1.7.8 pycrypto-2.4 - Get: [pypm-free.activestate.com] paramiko 1.7.8 - Get: [pypm-free.activestate.com] pycrypto 2.4 - Installing paramiko-1.7.8 - Installing pycrypto-2.4 - C:\> +.. _installation instructions: +.. _Cryptography's install docs: https://cryptography.io/en/latest/installation/ .. _gssapi: @@ -115,8 +97,6 @@ due to their infrequent utility & non-platform-agnostic requirements): * It hopefully goes without saying but **all platforms** need **a working installation of GSS-API itself**, e.g. Heimdal. -* **All platforms** need `pyasn1 <https://pypi.python.org/pypi/pyasn1>`_ - ``0.1.7`` or better. * **Unix** needs `python-gssapi <https://pypi.python.org/pypi/python-gssapi/>`_ ``0.6.1`` or better. @@ -1,53 +1,65 @@ -from os import mkdir from os.path import join from shutil import rmtree, copytree -from invoke import Collection, ctask as task -from invocations import docs as _docs -from invocations.packaging import publish - - -d = 'sites' - -# Usage doc/API site (published as docs.paramiko.org) -docs_path = join(d, 'docs') -docs_build = join(docs_path, '_build') -docs = Collection.from_module(_docs, name='docs', config={ - 'sphinx.source': docs_path, - 'sphinx.target': docs_build, -}) - -# Main/about/changelog site ((www.)?paramiko.org) -www_path = join(d, 'www') -www = Collection.from_module(_docs, name='www', config={ - 'sphinx.source': www_path, - 'sphinx.target': join(www_path, '_build'), -}) +from invoke import Collection, task +from invocations.docs import docs, www, sites +from invocations.packaging.release import ns as release_coll, publish # Until we move to spec-based testing @task -def test(ctx, coverage=False): +def test(ctx, coverage=False, flags=""): + if "--verbose" not in flags.split(): + flags += " --verbose" runner = "python" if coverage: runner = "coverage run --source=paramiko" - flags = "--verbose" ctx.run("{0} test.py {1}".format(runner, flags), pty=True) +@task +def coverage(ctx): + ctx.run("coverage run --source=paramiko test.py --verbose") + + # Until we stop bundling docs w/ releases. Need to discover use cases first. +# TODO: would be nice to tie this into our own version of build() too, but +# still have publish() use that build()...really need to try out classes! @task -def release(ctx): +def release(ctx, sdist=True, wheel=True, sign=True, dry_run=False): + """ + Wraps invocations.packaging.publish to add baked-in docs folder. + """ # Build docs first. Use terribad workaround pending invoke #146 - ctx.run("inv docs") + ctx.run("inv docs", pty=True, hide=False) # Move the built docs into where Epydocs used to live target = 'docs' rmtree(target, ignore_errors=True) - copytree(docs_build, target) + # TODO: make it easier to yank out this config val from the docs coll + copytree('sites/docs/_build', target) # Publish - publish(ctx, wheel=True) + publish(ctx, sdist=sdist, wheel=wheel, sign=sign, dry_run=dry_run) # Remind - print("\n\nDon't forget to update RTD's versions page for new minor releases!") + print("\n\nDon't forget to update RTD's versions page for new minor " + "releases!") -ns = Collection(test, release, docs=docs, www=www) +# TODO: "replace one task with another" needs a better public API, this is +# using unpublished internals & skips all the stuff add_task() does re: +# aliasing, defaults etc. +release_coll.tasks['publish'] = release + +ns = Collection(test, coverage, release_coll, docs, www, sites) +ns.configure({ + 'packaging': { + # NOTE: many of these are also set in kwarg defaults above; but having + # them here too means once we get rid of our custom release(), the + # behavior stays. + 'sign': True, + 'wheel': True, + 'changelog_file': join( + www.configuration()['sphinx']['source'], + 'changelog.rst', + ), + }, +}) @@ -22,6 +22,7 @@ do the unit tests! """ +# flake8: noqa import os import re import sys @@ -43,8 +44,9 @@ from tests.test_kex import KexTest from tests.test_packetizer import PacketizerTest from tests.test_auth import AuthTest from tests.test_transport import TransportTest +from tests.test_ssh_exception import NoValidConnectionsErrorTest from tests.test_client import SSHClientTest -from test_client import SSHClientTest +from test_client import SSHClientTest # XXX why shadow the above import? from test_gssapi import GSSAPITest from test_ssh_gss import GSSAuthTest from test_kex_gss import GSSKexTest @@ -156,6 +158,7 @@ def main(): if options.use_transport: suite.addTest(unittest.makeSuite(AuthTest)) suite.addTest(unittest.makeSuite(TransportTest)) + suite.addTest(unittest.makeSuite(NoValidConnectionsErrorTest)) suite.addTest(unittest.makeSuite(SSHClientTest)) if options.use_sftp: suite.addTest(unittest.makeSuite(SFTPTest)) diff --git a/tests/loop.py b/tests/loop.py index 4f5dc163..e805ad96 100644 --- a/tests/loop.py +++ b/tests/loop.py @@ -37,9 +37,11 @@ class LoopSocket (object): self.__cv = threading.Condition(self.__lock) self.__timeout = None self.__mate = None + self._closed = False def close(self): self.__unlink() + self._closed = True try: self.__lock.acquire() self.__in_buffer = bytes() diff --git a/tests/stub_sftp.py b/tests/stub_sftp.py index a894c2ba..0d673091 100644 --- a/tests/stub_sftp.py +++ b/tests/stub_sftp.py @@ -22,8 +22,10 @@ A stub SFTP server for loopback SFTP testing. import os import sys -from paramiko import ServerInterface, SFTPServerInterface, SFTPServer, SFTPAttributes, \ - SFTPHandle, SFTP_OK, SFTP_FAILURE, AUTH_SUCCESSFUL, OPEN_SUCCEEDED +from paramiko import ( + ServerInterface, SFTPServerInterface, SFTPServer, SFTPAttributes, + SFTPHandle, SFTP_OK, SFTP_FAILURE, AUTH_SUCCESSFUL, OPEN_SUCCEEDED, +) from paramiko.common import o666 @@ -55,7 +57,7 @@ class StubSFTPHandle (SFTPHandle): class StubSFTPServer (SFTPServerInterface): # assume current folder is a fine root - # (the tests always create and eventualy delete a subfolder, so there shouldn't be any mess) + # (the tests always create and eventually delete a subfolder, so there shouldn't be any mess) ROOT = os.getcwd() def _realpath(self, path): diff --git a/tests/test_auth.py b/tests/test_auth.py index ec78e3ce..58b2f44f 100644 --- a/tests/test_auth.py +++ b/tests/test_auth.py @@ -23,10 +23,12 @@ Some unit tests for authenticating over a Transport. import sys import threading import unittest +from time import sleep -from paramiko import Transport, ServerInterface, RSAKey, DSSKey, \ - BadAuthenticationType, InteractiveQuery, \ - AuthenticationException +from paramiko import ( + Transport, ServerInterface, RSAKey, DSSKey, BadAuthenticationType, + InteractiveQuery, AuthenticationException, +) from paramiko import AUTH_FAILED, AUTH_PARTIALLY_SUCCESSFUL, AUTH_SUCCESSFUL from paramiko.py3compat import u from tests.loop import LoopSocket @@ -73,6 +75,9 @@ class NullServer (ServerInterface): return AUTH_SUCCESSFUL if username == 'bad-server': raise Exception("Ack!") + if username == 'unresponsive-server': + sleep(5) + return AUTH_SUCCESSFUL return AUTH_FAILED def check_auth_publickey(self, username, key): @@ -83,13 +88,13 @@ class NullServer (ServerInterface): return AUTH_SUCCESSFUL return AUTH_PARTIALLY_SUCCESSFUL return AUTH_FAILED - + def check_auth_interactive(self, username, submethods): if username == 'commie': self.username = username return InteractiveQuery('password', 'Please enter a password.', ('Password', False)) return AUTH_FAILED - + def check_auth_interactive_response(self, responses): if self.username == 'commie': if (len(responses) == 1) and (responses[0] == 'cat'): @@ -111,7 +116,7 @@ class AuthTest (unittest.TestCase): self.ts.close() self.socks.close() self.sockc.close() - + def start_server(self): host_key = RSAKey.from_private_key_file(test_path('test_rsa.key')) self.public_host_key = RSAKey(data=host_key.asbytes()) @@ -120,7 +125,7 @@ class AuthTest (unittest.TestCase): self.server = NullServer() self.assertTrue(not self.event.is_set()) self.ts.start_server(self.event, self.server) - + def verify_finished(self): self.event.wait(1.0) self.assertTrue(self.event.is_set()) @@ -156,7 +161,7 @@ class AuthTest (unittest.TestCase): self.assertTrue(issubclass(etype, AuthenticationException)) self.tc.auth_password(username='slowdive', password='pygmalion') self.verify_finished() - + def test_3_multipart_auth(self): """ verify that multipart auth works. @@ -187,7 +192,7 @@ class AuthTest (unittest.TestCase): self.assertEqual(self.got_prompts, [('Password', False)]) self.assertEqual([], remain) self.verify_finished() - + def test_5_interactive_auth_fallback(self): """ verify that a password auth attempt will fallback to "interactive" @@ -232,3 +237,24 @@ class AuthTest (unittest.TestCase): except: etype, evalue, etb = sys.exc_info() self.assertTrue(issubclass(etype, AuthenticationException)) + + def test_9_auth_non_responsive(self): + """ + verify that authentication times out if server takes to long to + respond (or never responds). + """ + auth_timeout = self.tc.auth_timeout + self.tc.auth_timeout = 2 # Reduce to 2 seconds to speed up test + + try: + self.start_server() + self.tc.connect() + try: + remain = self.tc.auth_password('unresponsive-server', 'hello') + except: + etype, evalue, etb = sys.exc_info() + self.assertTrue(issubclass(etype, AuthenticationException)) + self.assertTrue('Authentication timeout' in str(evalue)) + finally: + # Restore value + self.tc.auth_timeout = auth_timeout diff --git a/tests/test_client.py b/tests/test_client.py index 3d2e75c9..aa3ff59b 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -22,6 +22,8 @@ Some unit tests for SSHClient. from __future__ import with_statement +import gc +import platform import socket from tempfile import mkstemp import threading @@ -31,8 +33,9 @@ import warnings import os import time from tests.util import test_path + import paramiko -from paramiko.common import PY2, b +from paramiko.common import PY2 from paramiko.ssh_exception import SSHException @@ -40,6 +43,7 @@ FINGERPRINTS = { 'ssh-dss': b'\x44\x78\xf0\xb9\xa2\x3c\xc5\x18\x20\x09\xff\x75\x5b\xc1\xd2\x6c', 'ssh-rsa': b'\x60\x73\x38\x44\xcb\x51\x86\x65\x7f\xde\xda\xa2\x2b\x5a\x57\xd5', 'ecdsa-sha2-nistp256': b'\x25\x19\xeb\x55\xe6\xa1\x47\xff\x4f\x38\xd2\x75\x6f\xa5\xd5\x60', + 'ssh-ed25519': b'\xb3\xd5"\xaa\xf9u^\xe8\xcd\x0e\xea\x02\xb9)\xa2\x80', } @@ -57,6 +61,9 @@ class NullServer (paramiko.ServerInterface): def check_auth_password(self, username, password): if (username == 'slowdive') and (password == 'pygmalion'): return paramiko.AUTH_SUCCESSFUL + if (username == 'slowdive') and (password == 'unresponsive-server'): + time.sleep(5) + return paramiko.AUTH_SUCCESSFUL return paramiko.AUTH_FAILED def check_auth_publickey(self, username, key): @@ -75,10 +82,20 @@ class NullServer (paramiko.ServerInterface): return paramiko.OPEN_SUCCEEDED def check_channel_exec_request(self, channel, command): - if command != 'yes': + if command != b'yes': return False return True + def check_channel_env_request(self, channel, name, value): + if name == 'INVALID_ENV': + return False + + if not hasattr(channel, 'env'): + setattr(channel, 'env', {}) + + channel.env[name] = value + return True + class SSHClientTest (unittest.TestCase): @@ -87,6 +104,12 @@ class SSHClientTest (unittest.TestCase): self.sockl.bind(('localhost', 0)) self.sockl.listen(1) self.addr, self.port = self.sockl.getsockname() + self.connect_kwargs = dict( + hostname=self.addr, + port=self.port, + username='slowdive', + look_for_keys=False, + ) self.event = threading.Event() def tearDown(self): @@ -124,7 +147,7 @@ class SSHClientTest (unittest.TestCase): self.tc.get_host_keys().add('[%s]:%d' % (self.addr, self.port), 'ssh-rsa', public_host_key) # Actual connection - self.tc.connect(self.addr, self.port, username='slowdive', **kwargs) + self.tc.connect(**dict(self.connect_kwargs, **kwargs)) # Authentication successful? self.event.wait(1.0) @@ -173,7 +196,10 @@ class SSHClientTest (unittest.TestCase): """ verify that SSHClient works with an ECDSA key. """ - self._test_connection(key_filename=test_path('test_ecdsa.key')) + self._test_connection(key_filename=test_path('test_ecdsa_256.key')) + + def test_client_ed25519(self): + self._test_connection(key_filename=test_path('test_ed25519.key')) def test_3_multiple_key_files(self): """ @@ -190,15 +216,21 @@ class SSHClientTest (unittest.TestCase): for attempt, accept in ( (['rsa', 'dss'], ['dss']), # Original test #3 (['dss', 'rsa'], ['dss']), # Ordering matters sometimes, sadly - (['dss', 'rsa', 'ecdsa'], ['dss']), # Try ECDSA but fail - (['rsa', 'ecdsa'], ['ecdsa']), # ECDSA success + (['dss', 'rsa', 'ecdsa_256'], ['dss']), # Try ECDSA but fail + (['rsa', 'ecdsa_256'], ['ecdsa']), # ECDSA success ): - self._test_connection( - key_filename=[ - test_path('test_{0}.key'.format(x)) for x in attempt - ], - allowed_keys=[types_[x] for x in accept], - ) + try: + self._test_connection( + key_filename=[ + test_path('test_{0}.key'.format(x)) for x in attempt + ], + allowed_keys=[types_[x] for x in accept], + ) + finally: + # Clean up to avoid occasional gc-related deadlocks. + # TODO: use nose test generators after nose port + self.tearDown() + self.setUp() def test_multiple_key_files_failure(self): """ @@ -223,7 +255,7 @@ class SSHClientTest (unittest.TestCase): self.tc = paramiko.SSHClient() self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.assertEqual(0, len(self.tc.get_host_keys())) - self.tc.connect(self.addr, self.port, username='slowdive', password='pygmalion') + self.tc.connect(password='pygmalion', **self.connect_kwargs) self.event.wait(1.0) self.assertTrue(self.event.is_set()) @@ -266,19 +298,18 @@ class SSHClientTest (unittest.TestCase): transport's packetizer) is closed. """ # Unclear why this is borked on Py3, but it is, and does not seem worth - # pursuing at the moment. + # pursuing at the moment. Skipped on PyPy because it fails on travis + # for unknown reasons, works fine locally. # XXX: It's the release of the references to e.g packetizer that fails # in py3... - if not PY2: + if not PY2 or platform.python_implementation() == "PyPy": return threading.Thread(target=self._run).start() - host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key')) - public_host_key = paramiko.RSAKey(data=host_key.asbytes()) self.tc = paramiko.SSHClient() self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.assertEqual(0, len(self.tc.get_host_keys())) - self.tc.connect(self.addr, self.port, username='slowdive', password='pygmalion') + self.tc.connect(**dict(self.connect_kwargs, password='pygmalion')) self.event.wait(1.0) self.assertTrue(self.event.is_set()) @@ -289,14 +320,10 @@ class SSHClientTest (unittest.TestCase): self.tc.close() del self.tc - # hrm, sometimes p isn't cleared right away. why is that? - #st = time.time() - #while (time.time() - st < 5.0) and (p() is not None): - # time.sleep(0.1) - - # instead of dumbly waiting for the GC to collect, force a collection - # to see whether the SSHClient object is deallocated correctly - import gc + # force a collection to see whether the SSHClient object is deallocated + # correctly. 2 GCs are needed to make sure it's really collected on + # PyPy + gc.collect() gc.collect() self.assertTrue(p() is None) @@ -306,14 +333,12 @@ class SSHClientTest (unittest.TestCase): verify that an SSHClient can be used a context manager """ threading.Thread(target=self._run).start() - host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key')) - public_host_key = paramiko.RSAKey(data=host_key.asbytes()) with paramiko.SSHClient() as tc: self.tc = tc self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.assertEquals(0, len(self.tc.get_host_keys())) - self.tc.connect(self.addr, self.port, username='slowdive', password='pygmalion') + self.tc.connect(**dict(self.connect_kwargs, password='pygmalion')) self.event.wait(1.0) self.assertTrue(self.event.is_set()) @@ -335,12 +360,99 @@ class SSHClientTest (unittest.TestCase): self.tc = paramiko.SSHClient() self.tc.get_host_keys().add('[%s]:%d' % (self.addr, self.port), 'ssh-rsa', public_host_key) # Connect with a half second banner timeout. + kwargs = dict(self.connect_kwargs, banner_timeout=0.5) self.assertRaises( paramiko.SSHException, self.tc.connect, - self.addr, - self.port, - username='slowdive', + **kwargs + ) + + def test_8_auth_trickledown(self): + """ + Failed key auth doesn't prevent subsequent pw auth from succeeding + """ + # NOTE: re #387, re #394 + # If pkey module used within Client._auth isn't correctly handling auth + # errors (e.g. if it allows things like ValueError to bubble up as per + # midway through #394) client.connect() will fail (at key load step) + # instead of succeeding (at password step) + kwargs = dict( + # Password-protected key whose passphrase is not 'pygmalion' (it's + # 'television' as per tests/test_pkey.py). NOTE: must use + # key_filename, loading the actual key here with PKey will except + # immediately; we're testing the try/except crap within Client. + key_filename=[test_path('test_rsa_password.key')], + # Actual password for default 'slowdive' user password='pygmalion', - banner_timeout=0.5 ) + self._test_connection(**kwargs) + + def test_9_auth_timeout(self): + """ + verify that the SSHClient has a configurable auth timeout + """ + threading.Thread(target=self._run).start() + host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key')) + public_host_key = paramiko.RSAKey(data=host_key.asbytes()) + + self.tc = paramiko.SSHClient() + self.tc.get_host_keys().add('[%s]:%d' % (self.addr, self.port), 'ssh-rsa', public_host_key) + # Connect with a half second auth timeout + kwargs = dict(self.connect_kwargs, password='unresponsive-server', auth_timeout=0.5) + self.assertRaises( + paramiko.AuthenticationException, + self.tc.connect, + **kwargs + ) + + def test_update_environment(self): + """ + Verify that environment variables can be set by the client. + """ + threading.Thread(target=self._run).start() + + self.tc = paramiko.SSHClient() + self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.assertEqual(0, len(self.tc.get_host_keys())) + self.tc.connect(self.addr, self.port, username='slowdive', password='pygmalion') + + self.event.wait(1.0) + self.assertTrue(self.event.isSet()) + self.assertTrue(self.ts.is_active()) + + target_env = {b'A': b'B', b'C': b'd'} + + self.tc.exec_command('yes', environment=target_env) + schan = self.ts.accept(1.0) + self.assertEqual(target_env, getattr(schan, 'env', {})) + schan.close() + + # Cannot use assertRaises in context manager mode as it is not supported + # in Python 2.6. + try: + # Verify that a rejection by the server can be detected + self.tc.exec_command('yes', environment={b'INVALID_ENV': b''}) + except SSHException as e: + self.assertTrue('INVALID_ENV' in str(e), + 'Expected variable name in error message') + self.assertTrue(isinstance(e.args[1], SSHException), + 'Expected original SSHException in exception') + else: + self.assertFalse(False, 'SSHException was not thrown.') + + + def test_missing_key_policy_accepts_classes_or_instances(self): + """ + Client.missing_host_key_policy() can take classes or instances. + """ + # AN ACTUAL UNIT TEST?! GOOD LORD + # (But then we have to test a private API...meh.) + client = paramiko.SSHClient() + # Default + assert isinstance(client._policy, paramiko.RejectPolicy) + # Hand in an instance (classic behavior) + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + assert isinstance(client._policy, paramiko.AutoAddPolicy) + # Hand in just the class (new behavior) + client.set_missing_host_key_policy(paramiko.AutoAddPolicy) + assert isinstance(client._policy, paramiko.AutoAddPolicy) diff --git a/tests/test_ecdsa.key b/tests/test_ecdsa_256.key index 42d44734..42d44734 100644 --- a/tests/test_ecdsa.key +++ b/tests/test_ecdsa_256.key diff --git a/tests/test_ecdsa_384.key b/tests/test_ecdsa_384.key new file mode 100644 index 00000000..796bf417 --- /dev/null +++ b/tests/test_ecdsa_384.key @@ -0,0 +1,6 @@ +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDBDdO8IXvlLJgM7+sNtPl7tI7FM5kzuEUEEPRjXIPQM7mISciwJPBt+ +y43EuG8nL4mgBwYFK4EEACKhZANiAAQWxom0C1vQAGYhjdoREMVmGKBWlisDdzyk +mgyUjKpiJ9WfbIEVLsPGP8OdNjhr1y/8BZNIts+dJd6VmYw+4HzB+4F+U1Igs8K0 +JEvh59VNkvWheViadDXCM2MV8Nq+DNg= +-----END EC PRIVATE KEY----- diff --git a/tests/test_ecdsa_521.key b/tests/test_ecdsa_521.key new file mode 100644 index 00000000..b87dc90f --- /dev/null +++ b/tests/test_ecdsa_521.key @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIAprQtAS3OF6iVUkT8IowTHWicHzShGgk86EtuEXvfQnhZFKsWm6Jo +iqAr1yEaiuI9LfB3Xs8cjuhgEEfbduYr/f6gBwYFK4EEACOhgYkDgYYABACaOaFL +ZGuxa5AW16qj6VLypFbLrEWrt9AZUloCMefxO8bNLjK/O5g0rAVasar1TnyHE9qj +4NwzANZASWjQNbc4MAG8vzqezFwLIn/kNyNTsXNfqEko9OgHZknlj2Z79dwTJcRA +L4QLcT5aND0EHZLB2fAUDXiWIb2j4rg1mwPlBMiBXA== +-----END EC PRIVATE KEY----- diff --git a/tests/test_ecdsa_password.key b/tests/test_ecdsa_password_256.key index eb7910ed..eb7910ed 100644 --- a/tests/test_ecdsa_password.key +++ b/tests/test_ecdsa_password_256.key diff --git a/tests/test_ecdsa_password_384.key b/tests/test_ecdsa_password_384.key new file mode 100644 index 00000000..eba33c14 --- /dev/null +++ b/tests/test_ecdsa_password_384.key @@ -0,0 +1,9 @@ +-----BEGIN EC PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,7F7B5DBE4CE040D822441AFE7A023A1D + +y/d6tGonAXYgJniQoFCdto+CuT1y1s41qzwNLN9YdNq/+R/dtQvZAaOuGtHJRFE6 +wWabhY1bSjavVPT2z1Zw1jhDJX5HGrf9LDoyORKtUWtUJoUvGdYLHbcg8Q+//WRf +R0A01YuSw1SJX0a225S1aRcsDAk1k5F8EMb8QzSSDgjAOI8ldQF35JI+ofNSGjgS +BPOlorQXTJxDOGmokw/Wql6MbhajXKPO39H2Z53W88U= +-----END EC PRIVATE KEY----- diff --git a/tests/test_ecdsa_password_521.key b/tests/test_ecdsa_password_521.key new file mode 100644 index 00000000..5986b930 --- /dev/null +++ b/tests/test_ecdsa_password_521.key @@ -0,0 +1,10 @@ +-----BEGIN EC PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,AEB2DE62C65D1A88C4940A3476B2F10A + +5kNk/FFPbHa0402QTrgpIT28uirJ4Amvb2/ryOEyOCe0NPbTLCqlQekj2RFYH2Un +pgCLUDkelKQv4pyuK8qWS7R+cFjE/gHHCPUWkK3djZUC8DKuA9lUKeQIE+V1vBHc +L5G+MpoYrPgaydcGx/Uqnc/kVuZx1DXLwrGGtgwNROVBtmjXC9EdfeXHLL1y0wvH +paNgacJpUtgqJEmiehf7eL/eiReegG553rZK3jjfboGkREUaKR5XOgamiKUtgKoc +sMpImVYCsRKd/9RI+VOqErZaEvy/9j0Ye3iH32wGOaA= +-----END EC PRIVATE KEY----- diff --git a/tests/test_ed25519.key b/tests/test_ed25519.key new file mode 100644 index 00000000..eb9f94c2 --- /dev/null +++ b/tests/test_ed25519.key @@ -0,0 +1,8 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACB69SvZKJh/9VgSL0G27b5xVYa8nethH3IERbi0YqJDXwAAAKhjwAdrY8AH +awAAAAtzc2gtZWQyNTUxOQAAACB69SvZKJh/9VgSL0G27b5xVYa8nethH3IERbi0YqJDXw +AAAEA9tGQi2IrprbOSbDCF+RmAHd6meNSXBUQ2ekKXm4/8xnr1K9komH/1WBIvQbbtvnFV +hryd62EfcgRFuLRiokNfAAAAI2FsZXhfZ2F5bm9yQEFsZXhzLU1hY0Jvb2stQWlyLmxvY2 +FsAQI= +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/test_ed25519_password.key b/tests/test_ed25519_password.key new file mode 100644 index 00000000..d178aaae --- /dev/null +++ b/tests/test_ed25519_password.key @@ -0,0 +1,8 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAACmFlczI1Ni1jYmMAAAAGYmNyeXB0AAAAGAAAABDaKD4ac7 +kieb+UfXaLaw68AAAAEAAAAAEAAAAzAAAAC3NzaC1lZDI1NTE5AAAAIOQn7fjND5ozMSV3 +CvbEtIdT73hWCMRjzS/lRdUDw50xAAAAsE8kLGyYBnl9ihJNqv378y6mO3SkzrDbWXOnK6 +ij0vnuTAvcqvWHAnyu6qBbplu/W2m55ZFeAItgaEcV2/V76sh/sAKlERqrLFyXylN0xoOW +NU5+zU08aTlbSKGmeNUU2xE/xfJq12U9XClIRuVUkUpYANxNPbmTRpVrbD3fgXMhK97Jrb +DEn8ca1IqMPiYmd/hpe5+tq3OxyRljXjCUFWTnqkp9VvUdzSTdSGZHsW9i +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/test_file.py b/tests/test_file.py index a6ff69e9..7fab6985 100755 --- a/tests/test_file.py +++ b/tests/test_file.py @@ -70,9 +70,9 @@ class BufferedFileTest (unittest.TestCase): def test_2_readline(self): f = LoopbackFile('r+U') - f.write(b'First line.\nSecond line.\r\nThird line.\n' + + f.write(b'First line.\nSecond line.\r\nThird line.\n' + b'Fourth line.\nFinal line non-terminated.') - + self.assertEqual(f.readline(), 'First line.\n') # universal newline mode should convert this linefeed: self.assertEqual(f.readline(), 'Second line.\n') @@ -165,7 +165,28 @@ class BufferedFileTest (unittest.TestCase): f.write(buffer(b'Too small.')) f.close() + def test_9_readable(self): + f = LoopbackFile('r') + self.assertTrue(f.readable()) + self.assertFalse(f.writable()) + self.assertFalse(f.seekable()) + f.close() + + def test_A_writable(self): + f = LoopbackFile('w') + self.assertTrue(f.writable()) + self.assertFalse(f.readable()) + self.assertFalse(f.seekable()) + f.close() + + def test_B_readinto(self): + data = bytearray(5) + f = LoopbackFile('r+') + f._write(b"hello") + f.readinto(data) + self.assertEqual(data, b'hello') + f.close() + if __name__ == '__main__': from unittest import main main() - diff --git a/tests/test_gssapi.py b/tests/test_gssapi.py index 96c268d9..bc220108 100644 --- a/tests/test_gssapi.py +++ b/tests/test_gssapi.py @@ -104,9 +104,11 @@ class GSSAPITest(unittest.TestCase): status = gss_srv_ctxt.verify_mic(mic_msg, mic_token) self.assertEquals(0, status) else: - gss_flags = sspicon.ISC_REQ_INTEGRITY |\ - sspicon.ISC_REQ_MUTUAL_AUTH |\ - sspicon.ISC_REQ_DELEGATE + gss_flags = ( + sspicon.ISC_REQ_INTEGRITY | + sspicon.ISC_REQ_MUTUAL_AUTH | + sspicon.ISC_REQ_DELEGATE + ) # Initialize a GSS-API context. target_name = "host/" + socket.getfqdn(targ_name) gss_ctxt = sspi.ClientAuth("Kerberos", diff --git a/tests/test_hostkeys.py b/tests/test_hostkeys.py index 0ee1bbf0..2c7ceeb9 100644 --- a/tests/test_hostkeys.py +++ b/tests/test_hostkeys.py @@ -31,6 +31,7 @@ test_hosts_file = """\ secure.example.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA1PD6U2/TVxET6lkpKhOk5r\ 9q/kAYG6sP9f5zuUYP8i7FOFp/6ncCEbbtg/lB+A3iidyxoSWl+9jtoyyDOOVX4UIDV9G11Ml8om3\ D+jrpI9cycZHqilK0HmxDeCuxbwyMuaCygU9gS2qoRvNLWZk70OpIKSSpBo0Wl3/XUmz9uhc= +broken.example.com ssh-rsa AAAA happy.example.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA8bP1ZA7DCZDB9J0s50l31M\ BGQ3GQ/Fc7SX6gkpXkwcZryoi4kNFhHu5LvHcZPdxXV1D+uTMfGS1eyd2Yz/DoNWXNAl8TI0cAsW\ 5ymME3bQ4J/k1IKxCtz/bAlAqFgKoc+EolMziDYqWIATtW0rYTJvzGAzTmMj80/QpsFH+Pc2M= @@ -114,3 +115,15 @@ class HostKeysTest (unittest.TestCase): self.assertEqual(b'7EC91BB336CB6D810B124B1353C32396', fp) fp = hexlify(hostdict['secure.example.com']['ssh-dss'].get_fingerprint()).upper() self.assertEqual(b'4478F0B9A23CC5182009FF755BC1D26C', fp) + + def test_delitem(self): + hostdict = paramiko.HostKeys('hostfile.temp') + target = 'happy.example.com' + entry = hostdict[target] # will KeyError if not present + del hostdict[target] + try: + entry = hostdict[target] + except KeyError: + pass # Good + else: + assert False, "Entry was not deleted from HostKeys on delitem!" diff --git a/tests/test_kex.py b/tests/test_kex.py index 56f1b7c7..b7f588f7 100644 --- a/tests/test_kex.py +++ b/tests/test_kex.py @@ -20,20 +20,33 @@ Some unit tests for the key exchange protocols. """ -from binascii import hexlify +from binascii import hexlify, unhexlify import os import unittest import paramiko.util from paramiko.kex_group1 import KexGroup1 -from paramiko.kex_gex import KexGex +from paramiko.kex_gex import KexGex, KexGexSHA256 from paramiko import Message from paramiko.common import byte_chr +from paramiko.kex_ecdh_nist import KexNistp256 +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.asymmetric import ec def dummy_urandom(n): return byte_chr(0xcc) * n +def dummy_generate_key_pair(obj): + private_key_value = 94761803665136558137557783047955027733968423115106677159790289642479432803037 + public_key_numbers = "042bdab212fa8ba1b7c843301682a4db424d307246c7e1e6083c41d9ca7b098bf30b3d63e2ec6278488c135360456cc054b3444ecc45998c08894cbc1370f5f989" + public_key_numbers_obj = ec.EllipticCurvePublicNumbers.from_encoded_point(ec.SECP256R1(), unhexlify(public_key_numbers)) + obj.P = ec.EllipticCurvePrivateNumbers(private_value=private_key_value, public_numbers=public_key_numbers_obj).private_key(default_backend()) + if obj.transport.server_mode: + obj.Q_S = ec.EllipticCurvePublicNumbers.from_encoded_point(ec.SECP256R1(), unhexlify(public_key_numbers)).public_key(default_backend()) + return + obj.Q_C = ec.EllipticCurvePublicNumbers.from_encoded_point(ec.SECP256R1(), unhexlify(public_key_numbers)).public_key(default_backend()) + class FakeKey (object): def __str__(self): @@ -93,9 +106,12 @@ class KexTest (unittest.TestCase): def setUp(self): self._original_urandom = os.urandom os.urandom = dummy_urandom + self._original_generate_key_pair = KexNistp256._generate_key_pair + KexNistp256._generate_key_pair = dummy_generate_key_pair def tearDown(self): os.urandom = self._original_urandom + KexNistp256._generate_key_pair = self._original_generate_key_pair def test_1_group1_client(self): transport = FakeTransport() @@ -252,3 +268,160 @@ class KexTest (unittest.TestCase): self.assertEqual(H, hexlify(transport._H).upper()) self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) self.assertTrue(transport._activated) + + def test_7_gex_sha256_client(self): + transport = FakeTransport() + transport.server_mode = False + kex = KexGexSHA256(transport) + kex.start_kex() + x = b'22000004000000080000002000' + self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) + self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect) + + msg = Message() + msg.add_mpint(FakeModulusPack.P) + msg.add_mpint(FakeModulusPack.G) + msg.rewind() + kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg) + x = b'20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4' + self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) + self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect) + + msg = Message() + msg.add_string('fake-host-key') + msg.add_mpint(69) + msg.add_string('fake-sig') + msg.rewind() + kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg) + H = b'AD1A9365A67B4496F05594AD1BF656E3CDA0851289A4C1AFF549FEAE50896DF4' + self.assertEqual(self.K, transport._K) + self.assertEqual(H, hexlify(transport._H).upper()) + self.assertEqual((b'fake-host-key', b'fake-sig'), transport._verify) + self.assertTrue(transport._activated) + + def test_8_gex_sha256_old_client(self): + transport = FakeTransport() + transport.server_mode = False + kex = KexGexSHA256(transport) + kex.start_kex(_test_old_style=True) + x = b'1E00000800' + self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) + self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_GROUP,), transport._expect) + + msg = Message() + msg.add_mpint(FakeModulusPack.P) + msg.add_mpint(FakeModulusPack.G) + msg.rewind() + kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_GROUP, msg) + x = b'20000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D4' + self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) + self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REPLY,), transport._expect) + + msg = Message() + msg.add_string('fake-host-key') + msg.add_mpint(69) + msg.add_string('fake-sig') + msg.rewind() + kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REPLY, msg) + H = b'518386608B15891AE5237DEE08DCADDE76A0BCEFCE7F6DB3AD66BC41D256DFE5' + self.assertEqual(self.K, transport._K) + self.assertEqual(H, hexlify(transport._H).upper()) + self.assertEqual((b'fake-host-key', b'fake-sig'), transport._verify) + self.assertTrue(transport._activated) + + def test_9_gex_sha256_server(self): + transport = FakeTransport() + transport.server_mode = True + kex = KexGexSHA256(transport) + kex.start_kex() + self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD), transport._expect) + + msg = Message() + msg.add_int(1024) + msg.add_int(2048) + msg.add_int(4096) + msg.rewind() + kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, msg) + x = b'1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102' + self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) + self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect) + + msg = Message() + msg.add_mpint(12345) + msg.rewind() + kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg) + K = 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 + H = b'CCAC0497CF0ABA1DBF55E1A3995D17F4CC31824B0E8D95CDF8A06F169D050D80' + x = b'210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967' + self.assertEqual(K, transport._K) + self.assertEqual(H, hexlify(transport._H).upper()) + self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) + self.assertTrue(transport._activated) + + def test_10_gex_sha256_server_with_old_client(self): + transport = FakeTransport() + transport.server_mode = True + kex = KexGexSHA256(transport) + kex.start_kex() + self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST, paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD), transport._expect) + + msg = Message() + msg.add_int(2048) + msg.rewind() + kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_REQUEST_OLD, msg) + x = b'1F0000008100FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF0000000102' + self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) + self.assertEqual((paramiko.kex_gex._MSG_KEXDH_GEX_INIT,), transport._expect) + + msg = Message() + msg.add_mpint(12345) + msg.rewind() + kex.parse_next(paramiko.kex_gex._MSG_KEXDH_GEX_INIT, msg) + K = 67592995013596137876033460028393339951879041140378510871612128162185209509220726296697886624612526735888348020498716482757677848959420073720160491114319163078862905400020959196386947926388406687288901564192071077389283980347784184487280885335302632305026248574716290537036069329724382811853044654824945750581 + H = b'3DDD2AD840AD095E397BA4D0573972DC60F6461FD38A187CACA6615A5BC8ADBB' + x = b'210000000866616B652D6B6579000000807E2DDB1743F3487D6545F04F1C8476092FB912B013626AB5BCEB764257D88BBA64243B9F348DF7B41B8C814A995E00299913503456983FFB9178D3CD79EB6D55522418A8ABF65375872E55938AB99A84A0B5FC8A1ECC66A7C3766E7E0F80B7CE2C9225FC2DD683F4764244B72963BBB383F529DCF0C5D17740B8A2ADBE9208D40000000866616B652D736967' + self.assertEqual(K, transport._K) + self.assertEqual(H, hexlify(transport._H).upper()) + self.assertEqual(x, hexlify(transport._message.asbytes()).upper()) + self.assertTrue(transport._activated) + + def test_11_kex_nistp256_client(self): + K = 91610929826364598472338906427792435253694642563583721654249504912114314269754 + transport = FakeTransport() + transport.server_mode = False + kex = KexNistp256(transport) + kex.start_kex() + self.assertEqual((paramiko.kex_ecdh_nist._MSG_KEXECDH_REPLY,), transport._expect) + + #fake reply + msg = Message() + msg.add_string('fake-host-key') + Q_S = unhexlify("043ae159594ba062efa121480e9ef136203fa9ec6b6e1f8723a321c16e62b945f573f3b822258cbcd094b9fa1c125cbfe5f043280893e66863cc0cb4dccbe70210") + msg.add_string(Q_S) + msg.add_string('fake-sig') + msg.rewind() + kex.parse_next(paramiko.kex_ecdh_nist._MSG_KEXECDH_REPLY, msg) + H = b'BAF7CE243A836037EB5D2221420F35C02B9AB6C957FE3BDE3369307B9612570A' + self.assertEqual(K, kex.transport._K) + self.assertEqual(H, hexlify(transport._H).upper()) + self.assertEqual((b'fake-host-key', b'fake-sig'), transport._verify) + self.assertTrue(transport._activated) + + def test_12_kex_nistp256_server(self): + K = 91610929826364598472338906427792435253694642563583721654249504912114314269754 + transport = FakeTransport() + transport.server_mode = True + kex = KexNistp256(transport) + kex.start_kex() + self.assertEqual((paramiko.kex_ecdh_nist._MSG_KEXECDH_INIT,), transport._expect) + + #fake init + msg=Message() + Q_C = unhexlify("043ae159594ba062efa121480e9ef136203fa9ec6b6e1f8723a321c16e62b945f573f3b822258cbcd094b9fa1c125cbfe5f043280893e66863cc0cb4dccbe70210") + H = b'2EF4957AFD530DD3F05DBEABF68D724FACC060974DA9704F2AEE4C3DE861E7CA' + msg.add_string(Q_C) + msg.rewind() + kex.parse_next(paramiko.kex_ecdh_nist._MSG_KEXECDH_INIT, msg) + self.assertEqual(K, transport._K) + self.assertTrue(transport._activated) + self.assertEqual(H, hexlify(transport._H).upper()) diff --git a/tests/test_message.py b/tests/test_message.py index f308c037..f18cae90 100644 --- a/tests/test_message.py +++ b/tests/test_message.py @@ -92,12 +92,12 @@ class MessageTest (unittest.TestCase): def test_4_misc(self): msg = Message(self.__d) - self.assertEqual(msg.get_int(), 5) - self.assertEqual(msg.get_int(), 0x1122334455) - self.assertEqual(msg.get_int(), 0xf00000000000000000) + self.assertEqual(msg.get_adaptive_int(), 5) + self.assertEqual(msg.get_adaptive_int(), 0x1122334455) + self.assertEqual(msg.get_adaptive_int(), 0xf00000000000000000) self.assertEqual(msg.get_so_far(), self.__d[:29]) self.assertEqual(msg.get_remainder(), self.__d[29:]) msg.rewind() - self.assertEqual(msg.get_int(), 5) + self.assertEqual(msg.get_adaptive_int(), 5) self.assertEqual(msg.get_so_far(), self.__d[:4]) self.assertEqual(msg.get_remainder(), self.__d[4:]) diff --git a/tests/test_packetizer.py b/tests/test_packetizer.py index 8faec03c..02173292 100644 --- a/tests/test_packetizer.py +++ b/tests/test_packetizer.py @@ -20,12 +20,14 @@ Some unit tests for the ssh2 protocol in Transport. """ +import sys import unittest from hashlib import sha1 -from tests.loop import LoopSocket +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes -from Crypto.Cipher import AES +from tests.loop import LoopSocket from paramiko import Message, Packetizer, util from paramiko.common import byte_chr, zero_byte @@ -33,7 +35,6 @@ from paramiko.common import byte_chr, zero_byte x55 = byte_chr(0x55) x1f = byte_chr(0x1f) - class PacketizerTest (unittest.TestCase): def test_1_write(self): @@ -43,8 +44,12 @@ class PacketizerTest (unittest.TestCase): p = Packetizer(wsock) p.set_log(util.get_logger('paramiko.transport')) p.set_hexdump(True) - cipher = AES.new(zero_byte * 16, AES.MODE_CBC, x55 * 16) - p.set_outbound_cipher(cipher, 16, sha1, 12, x1f * 20) + encryptor = Cipher( + algorithms.AES(zero_byte * 16), + modes.CBC(x55 * 16), + backend=default_backend() + ).encryptor() + p.set_outbound_cipher(encryptor, 16, sha1, 12, x1f * 20) # message has to be at least 16 bytes long, so we'll have at least one # block of data encrypted that contains zero random padding bytes @@ -66,8 +71,12 @@ class PacketizerTest (unittest.TestCase): p = Packetizer(rsock) p.set_log(util.get_logger('paramiko.transport')) p.set_hexdump(True) - cipher = AES.new(zero_byte * 16, AES.MODE_CBC, x55 * 16) - p.set_inbound_cipher(cipher, 16, sha1, 12, x1f * 20) + decryptor = Cipher( + algorithms.AES(zero_byte * 16), + modes.CBC(x55 * 16), + backend=default_backend() + ).decryptor() + p.set_inbound_cipher(decryptor, 16, sha1, 12, x1f * 20) wsock.send(b'\x43\x91\x97\xbd\x5b\x50\xac\x25\x87\xc2\xc4\x6b\xc7\xe9\x38\xc0\x90\xd2\x16\x56\x0d\x71\x73\x61\x38\x7c\x4c\x3d\xfb\x97\x7d\xe2\x6e\x03\xb1\xa0\xc2\x1c\xd6\x41\x41\x4c\xb4\x59') cmd, m = p.read_message() self.assertEqual(100, cmd) @@ -76,14 +85,20 @@ class PacketizerTest (unittest.TestCase): self.assertEqual(900, m.get_int()) def test_3_closed(self): + if sys.platform.startswith("win"): # no SIGALRM on windows + return rsock = LoopSocket() wsock = LoopSocket() rsock.link(wsock) p = Packetizer(wsock) p.set_log(util.get_logger('paramiko.transport')) p.set_hexdump(True) - cipher = AES.new(zero_byte * 16, AES.MODE_CBC, x55 * 16) - p.set_outbound_cipher(cipher, 16, sha1, 12, x1f * 20) + encryptor = Cipher( + algorithms.AES(zero_byte * 16), + modes.CBC(x55 * 16), + backend=default_backend() + ).encryptor() + p.set_outbound_cipher(encryptor, 16, sha1, 12, x1f * 20) # message has to be at least 16 bytes long, so we'll have at least one # block of data encrypted that contains zero random padding bytes @@ -99,9 +114,13 @@ class PacketizerTest (unittest.TestCase): import signal class TimeoutError(Exception): - pass + def __init__(self, error_message): + if hasattr(errno, 'ETIME'): + self.message = os.sterror(errno.ETIME) + else: + self.messaage = error_message - def timeout(seconds=1, error_message=os.strerror(errno.ETIME)): + def timeout(seconds=1, error_message='Timer expired'): def decorator(func): def _handle_timeout(signum, frame): raise TimeoutError(error_message) diff --git a/tests/test_pkey.py b/tests/test_pkey.py index f673254f..a26ff170 100644 --- a/tests/test_pkey.py +++ b/tests/test_pkey.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com> # # This file is part of paramiko. @@ -24,56 +25,61 @@ import unittest import os from binascii import hexlify from hashlib import md5 +import base64 -from paramiko import RSAKey, DSSKey, ECDSAKey, Message, util -from paramiko.py3compat import StringIO, byte_chr, b, bytes +from paramiko import RSAKey, DSSKey, ECDSAKey, Ed25519Key, Message, util +from paramiko.py3compat import StringIO, byte_chr, b, bytes, PY2 from tests.util import test_path # from openssh's ssh-keygen PUB_RSA = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA049W6geFpmsljTwfvI1UmKWWJPNFI74+vNKTk4dmzkQY2yAMs6FhlvhlI8ysU4oj71ZsRYMecHbBbxdN79+JRFVYTKaLqjwGENeTd+yv4q+V2PvZv3fLnzApI3l7EJCqhWwJUHJ1jAkZzqDx0tyOL4uoZpww3nmE0kb3y21tH4c=' PUB_DSS = 'ssh-dss AAAAB3NzaC1kc3MAAACBAOeBpgNnfRzr/twmAQRu2XwWAp3CFtrVnug6s6fgwj/oLjYbVtjAy6pl/h0EKCWx2rf1IetyNsTxWrniA9I6HeDj65X1FyDkg6g8tvCnaNB8Xp/UUhuzHuGsMIipRxBxw9LF608EqZcj1E3ytktoW5B5OcjrkEoz3xG7C+rpIjYvAAAAFQDwz4UnmsGiSNu5iqjn3uTzwUpshwAAAIEAkxfFeY8P2wZpDjX0MimZl5wkoFQDL25cPzGBuB4OnB8NoUk/yjAHIIpEShw8V+LzouMK5CTJQo5+Ngw3qIch/WgRmMHy4kBq1SsXMjQCte1So6HBMvBPIW5SiMTmjCfZZiw4AYHK+B/JaOwaG9yRg2Ejg4Ok10+XFDxlqZo8Y+wAAACARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lYukmnjO1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+NwacIBlXa8cMDL7Q/69o0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgE=' -PUB_ECDSA = 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJSPZm3ZWkvk/Zx8WP+fZRZ5/NBBHnGQwR6uIC6XHGPDIHuWUzIjAwA0bzqkOUffEsbLe+uQgKl5kbc/L8KA/eo=' +PUB_ECDSA_256 = 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJSPZm3ZWkvk/Zx8WP+fZRZ5/NBBHnGQwR6uIC6XHGPDIHuWUzIjAwA0bzqkOUffEsbLe+uQgKl5kbc/L8KA/eo=' +PUB_ECDSA_384 = 'ecdsa-sha2-nistp384 AAAAE2VjZHNhLXNoYTItbmlzdHAzODQAAAAIbmlzdHAzODQAAABhBBbGibQLW9AAZiGN2hEQxWYYoFaWKwN3PKSaDJSMqmIn1Z9sgRUuw8Y/w502OGvXL/wFk0i2z50l3pWZjD7gfMH7gX5TUiCzwrQkS+Hn1U2S9aF5WJp0NcIzYxXw2r4M2A==' +PUB_ECDSA_521 = 'ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBACaOaFLZGuxa5AW16qj6VLypFbLrEWrt9AZUloCMefxO8bNLjK/O5g0rAVasar1TnyHE9qj4NwzANZASWjQNbc4MAG8vzqezFwLIn/kNyNTsXNfqEko9OgHZknlj2Z79dwTJcRAL4QLcT5aND0EHZLB2fAUDXiWIb2j4rg1mwPlBMiBXA==' FINGER_RSA = '1024 60:73:38:44:cb:51:86:65:7f:de:da:a2:2b:5a:57:d5' FINGER_DSS = '1024 44:78:f0:b9:a2:3c:c5:18:20:09:ff:75:5b:c1:d2:6c' -FINGER_ECDSA = '256 25:19:eb:55:e6:a1:47:ff:4f:38:d2:75:6f:a5:d5:60' +FINGER_ECDSA_256 = '256 25:19:eb:55:e6:a1:47:ff:4f:38:d2:75:6f:a5:d5:60' +FINGER_ECDSA_384 = '384 c1:8d:a0:59:09:47:41:8e:a8:a6:07:01:29:23:b4:65' +FINGER_ECDSA_521 = '521 44:58:22:52:12:33:16:0e:ce:0e:be:2c:7c:7e:cc:1e' SIGNED_RSA = '20:d7:8a:31:21:cb:f7:92:12:f2:a4:89:37:f5:78:af:e6:16:b6:25:b9:97:3d:a2:cd:5f:ca:20:21:73:4c:ad:34:73:8f:20:77:28:e2:94:15:08:d8:91:40:7a:85:83:bf:18:37:95:dc:54:1a:9b:88:29:6c:73:ca:38:b4:04:f1:56:b9:f2:42:9d:52:1b:29:29:b4:4f:fd:c9:2d:af:47:d2:40:76:30:f3:63:45:0c:d9:1d:43:86:0f:1c:70:e2:93:12:34:f3:ac:c5:0a:2f:14:50:66:59:f1:88:ee:c1:4a:e9:d1:9c:4e:46:f0:0e:47:6f:38:74:f1:44:a8' RSA_PRIVATE_OUT = """\ -----BEGIN RSA PRIVATE KEY----- -MIICXAIBAAKCAIEA049W6geFpmsljTwfvI1UmKWWJPNFI74+vNKTk4dmzkQY2yAM -s6FhlvhlI8ysU4oj71ZsRYMecHbBbxdN79+JRFVYTKaLqjwGENeTd+yv4q+V2PvZ -v3fLnzApI3l7EJCqhWwJUHJ1jAkZzqDx0tyOL4uoZpww3nmE0kb3y21tH4cCASMC -ggCAEiI6plhqipt4P05L3PYr0pHZq2VPEbE4k9eI/gRKo/c1VJxY3DJnc1cenKsk -trQRtW3OxCEufqsX5PNec6VyKkW+Ox6beJjMKm4KF8ZDpKi9Nw6MdX3P6Gele9D9 -+ieyhVFljrnAqcXsgChTBOYlL2imqCs3qRGAJ3cMBIAx3VsCQQD3pIFVYW398kE0 -n0e1icEpkbDRV4c5iZVhu8xKy2yyfy6f6lClSb2+Ub9uns7F3+b5v0pYSHbE9+/r -OpRq83AfAkEA2rMZlr8SnMXgnyka2LuggA9QgMYy18hyao1dUxySubNDa9N+q2QR -mwDisTUgRFHKIlDHoQmzPbXAmYZX1YlDmQJBAPCRLS5epV0XOAc7pL762OaNhzHC -veAfQKgVhKBt105PqaKpGyQ5AXcNlWQlPeTK4GBTbMrKDPna6RBkyrEJvV8CQBK+ -5O+p+kfztCrmRCE0p1tvBuZ3Y3GU1ptrM+KNa6mEZN1bRV8l1Z+SXJLYqv6Kquz/ -nBUeFq2Em3rfoSDugiMCQDyG3cxD5dKX3IgkhLyBWls/FLDk4x/DQ+NUTu0F1Cu6 -JJye+5ARLkL0EweMXf0tmIYfWItDLsWB0fKg/56h0js= +MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz +oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/ +d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB +gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0 +EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon +soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H +tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU +avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA +4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g +H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv +qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV +HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc +nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7 -----END RSA PRIVATE KEY----- """ DSS_PRIVATE_OUT = """\ -----BEGIN DSA PRIVATE KEY----- -MIIBvgIBAAKCAIEA54GmA2d9HOv+3CYBBG7ZfBYCncIW2tWe6Dqzp+DCP+guNhtW -2MDLqmX+HQQoJbHat/Uh63I2xPFaueID0jod4OPrlfUXIOSDqDy28Kdo0Hxen9RS -G7Me4awwiKlHEHHD0sXrTwSplyPUTfK2S2hbkHk5yOuQSjPfEbsL6ukiNi8CFQDw -z4UnmsGiSNu5iqjn3uTzwUpshwKCAIEAkxfFeY8P2wZpDjX0MimZl5wkoFQDL25c -PzGBuB4OnB8NoUk/yjAHIIpEShw8V+LzouMK5CTJQo5+Ngw3qIch/WgRmMHy4kBq -1SsXMjQCte1So6HBMvBPIW5SiMTmjCfZZiw4AYHK+B/JaOwaG9yRg2Ejg4Ok10+X -FDxlqZo8Y+wCggCARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lY -ukmnjO1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+N -wacIBlXa8cMDL7Q/69o0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgECFGI9 -QPSch9pT9XHqn+1rZ4bK+QGA +MIIBuwIBAAKBgQDngaYDZ30c6/7cJgEEbtl8FgKdwhba1Z7oOrOn4MI/6C42G1bY +wMuqZf4dBCglsdq39SHrcjbE8Vq54gPSOh3g4+uV9Rcg5IOoPLbwp2jQfF6f1FIb +sx7hrDCIqUcQccPSxetPBKmXI9RN8rZLaFuQeTnI65BKM98Ruwvq6SI2LwIVAPDP +hSeawaJI27mKqOfe5PPBSmyHAoGBAJMXxXmPD9sGaQ419DIpmZecJKBUAy9uXD8x +gbgeDpwfDaFJP8owByCKREocPFfi86LjCuQkyUKOfjYMN6iHIf1oEZjB8uJAatUr +FzI0ArXtUqOhwTLwTyFuUojE5own2WYsOAGByvgfyWjsGhvckYNhI4ODpNdPlxQ8 +ZamaPGPsAoGARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lYukmn +jO1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+NwacI +BlXa8cMDL7Q/69o0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgECFGI9QPSc +h9pT9XHqn+1rZ4bK+QGA -----END DSA PRIVATE KEY----- """ -ECDSA_PRIVATE_OUT = """\ +ECDSA_PRIVATE_OUT_256 = """\ -----BEGIN EC PRIVATE KEY----- MHcCAQEEIKB6ty3yVyKEnfF/zprx0qwC76MsMlHY4HXCnqho2eKioAoGCCqGSM49 AwEHoUQDQgAElI9mbdlaS+T9nHxY/59lFnn80EEecZDBHq4gLpccY8Mge5ZTMiMD @@ -81,17 +87,32 @@ ADRvOqQ5R98Sxst765CAqXmRtz8vwoD96g== -----END EC PRIVATE KEY----- """ -x1234 = b'\x01\x02\x03\x04' +ECDSA_PRIVATE_OUT_384 = """\ +-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDBDdO8IXvlLJgM7+sNtPl7tI7FM5kzuEUEEPRjXIPQM7mISciwJPBt+ +y43EuG8nL4mgBwYFK4EEACKhZANiAAQWxom0C1vQAGYhjdoREMVmGKBWlisDdzyk +mgyUjKpiJ9WfbIEVLsPGP8OdNjhr1y/8BZNIts+dJd6VmYw+4HzB+4F+U1Igs8K0 +JEvh59VNkvWheViadDXCM2MV8Nq+DNg= +-----END EC PRIVATE KEY----- +""" +ECDSA_PRIVATE_OUT_521 = """\ +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIAprQtAS3OF6iVUkT8IowTHWicHzShGgk86EtuEXvfQnhZFKsWm6Jo +iqAr1yEaiuI9LfB3Xs8cjuhgEEfbduYr/f6gBwYFK4EEACOhgYkDgYYABACaOaFL +ZGuxa5AW16qj6VLypFbLrEWrt9AZUloCMefxO8bNLjK/O5g0rAVasar1TnyHE9qj +4NwzANZASWjQNbc4MAG8vzqezFwLIn/kNyNTsXNfqEko9OgHZknlj2Z79dwTJcRA +L4QLcT5aND0EHZLB2fAUDXiWIb2j4rg1mwPlBMiBXA== +-----END EC PRIVATE KEY----- +""" -class KeyTest (unittest.TestCase): +x1234 = b'\x01\x02\x03\x04' - def setUp(self): - pass +TEST_KEY_BYTESTR_2 = '\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01#\x00\x00\x00\x81\x00\xd3\x8fV\xea\x07\x85\xa6k%\x8d<\x1f\xbc\x8dT\x98\xa5\x96$\xf3E#\xbe>\xbc\xd2\x93\x93\x87f\xceD\x18\xdb \x0c\xb3\xa1a\x96\xf8e#\xcc\xacS\x8a#\xefVlE\x83\x1epv\xc1o\x17M\xef\xdf\x89DUXL\xa6\x8b\xaa<\x06\x10\xd7\x93w\xec\xaf\xe2\xaf\x95\xd8\xfb\xd9\xbfw\xcb\x9f0)#y{\x10\x90\xaa\x85l\tPru\x8c\t\x19\xce\xa0\xf1\xd2\xdc\x8e/\x8b\xa8f\x9c0\xdey\x84\xd2F\xf7\xcbmm\x1f\x87' +TEST_KEY_BYTESTR_3 = '\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01#\x00\x00\x00\x00ӏV\x07k%<\x1fT$E#>ғfD\x18 \x0cae#̬S#VlE\x1epvo\x17M߉DUXL<\x06\x10דw\u2bd5ٿw˟0)#y{\x10l\tPru\t\x19Π\u070e/f0yFmm\x1f' - def tearDown(self): - pass +class KeyTest(unittest.TestCase): def test_1_generate_key_bytes(self): key = util.generate_key_bytes(md5, x1234, 'happy birthday', 30) exp = b'\x61\xE1\xF2\x72\xF4\xC1\xC4\x56\x15\x86\xBD\x32\x24\x98\xC0\xE9\x24\x67\x27\x80\xF4\x7B\xB3\x7D\xDA\x7D\x54\x01\x9E\x64' @@ -121,7 +142,7 @@ class KeyTest (unittest.TestCase): self.assertEqual(exp_rsa, my_rsa) self.assertEqual(PUB_RSA.split()[1], key.get_base64()) self.assertEqual(1024, key.get_bits()) - + def test_4_load_dss(self): key = DSSKey.from_private_key_file(test_path('test_dss.key')) self.assertEqual('ssh-dss', key.get_name()) @@ -205,43 +226,72 @@ class KeyTest (unittest.TestCase): msg.rewind() self.assertTrue(key.verify_ssh_sig(b'jerri blank', msg)) - def test_10_load_ecdsa(self): - key = ECDSAKey.from_private_key_file(test_path('test_ecdsa.key')) + def test_C_generate_ecdsa(self): + key = ECDSAKey.generate() + msg = key.sign_ssh_data(b'jerri blank') + msg.rewind() + self.assertTrue(key.verify_ssh_sig(b'jerri blank', msg)) + self.assertEqual(key.get_bits(), 256) + self.assertEqual(key.get_name(), 'ecdsa-sha2-nistp256') + + key = ECDSAKey.generate(bits=256) + msg = key.sign_ssh_data(b'jerri blank') + msg.rewind() + self.assertTrue(key.verify_ssh_sig(b'jerri blank', msg)) + self.assertEqual(key.get_bits(), 256) + self.assertEqual(key.get_name(), 'ecdsa-sha2-nistp256') + + key = ECDSAKey.generate(bits=384) + msg = key.sign_ssh_data(b'jerri blank') + msg.rewind() + self.assertTrue(key.verify_ssh_sig(b'jerri blank', msg)) + self.assertEqual(key.get_bits(), 384) + self.assertEqual(key.get_name(), 'ecdsa-sha2-nistp384') + + key = ECDSAKey.generate(bits=521) + msg = key.sign_ssh_data(b'jerri blank') + msg.rewind() + self.assertTrue(key.verify_ssh_sig(b'jerri blank', msg)) + self.assertEqual(key.get_bits(), 521) + self.assertEqual(key.get_name(), 'ecdsa-sha2-nistp521') + + def test_10_load_ecdsa_256(self): + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_256.key')) self.assertEqual('ecdsa-sha2-nistp256', key.get_name()) - exp_ecdsa = b(FINGER_ECDSA.split()[1].replace(':', '')) + exp_ecdsa = b(FINGER_ECDSA_256.split()[1].replace(':', '')) my_ecdsa = hexlify(key.get_fingerprint()) self.assertEqual(exp_ecdsa, my_ecdsa) - self.assertEqual(PUB_ECDSA.split()[1], key.get_base64()) + self.assertEqual(PUB_ECDSA_256.split()[1], key.get_base64()) self.assertEqual(256, key.get_bits()) s = StringIO() key.write_private_key(s) - self.assertEqual(ECDSA_PRIVATE_OUT, s.getvalue()) + self.assertEqual(ECDSA_PRIVATE_OUT_256, s.getvalue()) s.seek(0) key2 = ECDSAKey.from_private_key(s) self.assertEqual(key, key2) - def test_11_load_ecdsa_password(self): - key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_password.key'), b'television') + def test_11_load_ecdsa_password_256(self): + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_password_256.key'), b'television') self.assertEqual('ecdsa-sha2-nistp256', key.get_name()) - exp_ecdsa = b(FINGER_ECDSA.split()[1].replace(':', '')) + exp_ecdsa = b(FINGER_ECDSA_256.split()[1].replace(':', '')) my_ecdsa = hexlify(key.get_fingerprint()) self.assertEqual(exp_ecdsa, my_ecdsa) - self.assertEqual(PUB_ECDSA.split()[1], key.get_base64()) + self.assertEqual(PUB_ECDSA_256.split()[1], key.get_base64()) self.assertEqual(256, key.get_bits()) - def test_12_compare_ecdsa(self): + def test_12_compare_ecdsa_256(self): # verify that the private & public keys compare equal - key = ECDSAKey.from_private_key_file(test_path('test_ecdsa.key')) + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_256.key')) self.assertEqual(key, key) pub = ECDSAKey(data=key.asbytes()) self.assertTrue(key.can_sign()) self.assertTrue(not pub.can_sign()) self.assertEqual(key, pub) - def test_13_sign_ecdsa(self): + def test_13_sign_ecdsa_256(self): # verify that the rsa private key can sign and verify - key = ECDSAKey.from_private_key_file(test_path('test_ecdsa.key')) + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_256.key')) msg = key.sign_ssh_data(b'ice weasels') self.assertTrue(type(msg) is Message) msg.rewind() @@ -255,6 +305,109 @@ class KeyTest (unittest.TestCase): pub = ECDSAKey(data=key.asbytes()) self.assertTrue(pub.verify_ssh_sig(b'ice weasels', msg)) + def test_14_load_ecdsa_384(self): + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_384.key')) + self.assertEqual('ecdsa-sha2-nistp384', key.get_name()) + exp_ecdsa = b(FINGER_ECDSA_384.split()[1].replace(':', '')) + my_ecdsa = hexlify(key.get_fingerprint()) + self.assertEqual(exp_ecdsa, my_ecdsa) + self.assertEqual(PUB_ECDSA_384.split()[1], key.get_base64()) + self.assertEqual(384, key.get_bits()) + + s = StringIO() + key.write_private_key(s) + self.assertEqual(ECDSA_PRIVATE_OUT_384, s.getvalue()) + s.seek(0) + key2 = ECDSAKey.from_private_key(s) + self.assertEqual(key, key2) + + def test_15_load_ecdsa_password_384(self): + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_password_384.key'), b'television') + self.assertEqual('ecdsa-sha2-nistp384', key.get_name()) + exp_ecdsa = b(FINGER_ECDSA_384.split()[1].replace(':', '')) + my_ecdsa = hexlify(key.get_fingerprint()) + self.assertEqual(exp_ecdsa, my_ecdsa) + self.assertEqual(PUB_ECDSA_384.split()[1], key.get_base64()) + self.assertEqual(384, key.get_bits()) + + def test_16_compare_ecdsa_384(self): + # verify that the private & public keys compare equal + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_384.key')) + self.assertEqual(key, key) + pub = ECDSAKey(data=key.asbytes()) + self.assertTrue(key.can_sign()) + self.assertTrue(not pub.can_sign()) + self.assertEqual(key, pub) + + def test_17_sign_ecdsa_384(self): + # verify that the rsa private key can sign and verify + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_384.key')) + msg = key.sign_ssh_data(b'ice weasels') + self.assertTrue(type(msg) is Message) + msg.rewind() + self.assertEqual('ecdsa-sha2-nistp384', msg.get_text()) + # ECDSA signatures, like DSS signatures, tend to be different + # each time, so we can't compare against a "known correct" + # signature. + # Even the length of the signature can change. + + msg.rewind() + pub = ECDSAKey(data=key.asbytes()) + self.assertTrue(pub.verify_ssh_sig(b'ice weasels', msg)) + + def test_18_load_ecdsa_521(self): + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_521.key')) + self.assertEqual('ecdsa-sha2-nistp521', key.get_name()) + exp_ecdsa = b(FINGER_ECDSA_521.split()[1].replace(':', '')) + my_ecdsa = hexlify(key.get_fingerprint()) + self.assertEqual(exp_ecdsa, my_ecdsa) + self.assertEqual(PUB_ECDSA_521.split()[1], key.get_base64()) + self.assertEqual(521, key.get_bits()) + + s = StringIO() + key.write_private_key(s) + # Different versions of OpenSSL (SSLeay versions 0x1000100f and + # 0x1000207f for instance) use different apparently valid (as far as + # ssh-keygen is concerned) padding. So we can't check the actual value + # of the pem encoded key. + s.seek(0) + key2 = ECDSAKey.from_private_key(s) + self.assertEqual(key, key2) + + def test_19_load_ecdsa_password_521(self): + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_password_521.key'), b'television') + self.assertEqual('ecdsa-sha2-nistp521', key.get_name()) + exp_ecdsa = b(FINGER_ECDSA_521.split()[1].replace(':', '')) + my_ecdsa = hexlify(key.get_fingerprint()) + self.assertEqual(exp_ecdsa, my_ecdsa) + self.assertEqual(PUB_ECDSA_521.split()[1], key.get_base64()) + self.assertEqual(521, key.get_bits()) + + def test_20_compare_ecdsa_521(self): + # verify that the private & public keys compare equal + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_521.key')) + self.assertEqual(key, key) + pub = ECDSAKey(data=key.asbytes()) + self.assertTrue(key.can_sign()) + self.assertTrue(not pub.can_sign()) + self.assertEqual(key, pub) + + def test_21_sign_ecdsa_521(self): + # verify that the rsa private key can sign and verify + key = ECDSAKey.from_private_key_file(test_path('test_ecdsa_521.key')) + msg = key.sign_ssh_data(b'ice weasels') + self.assertTrue(type(msg) is Message) + msg.rewind() + self.assertEqual('ecdsa-sha2-nistp521', msg.get_text()) + # ECDSA signatures, like DSS signatures, tend to be different + # each time, so we can't compare against a "known correct" + # signature. + # Even the length of the signature can change. + + msg.rewind() + pub = ECDSAKey(data=key.asbytes()) + self.assertTrue(pub.verify_ssh_sig(b'ice weasels', msg)) + def test_salt_size(self): # Read an existing encrypted private key file_ = test_path('test_rsa_password.key') @@ -271,3 +424,16 @@ class KeyTest (unittest.TestCase): self.assertEqual(key, key2) finally: os.remove(newfile) + + def test_stringification(self): + key = RSAKey.from_private_key_file(test_path('test_rsa.key')) + comparable = TEST_KEY_BYTESTR_2 if PY2 else TEST_KEY_BYTESTR_3 + self.assertEqual(str(key), comparable) + + def test_ed25519(self): + key1 = Ed25519Key.from_private_key_file(test_path('test_ed25519.key')) + key2 = Ed25519Key.from_private_key_file( + test_path('test_ed25519_password.key'), b'abc123' + ) + + self.assertNotEqual(key1.asbytes(), key2.asbytes()) diff --git a/tests/test_sftp.py b/tests/test_sftp.py index b7ace8e2..8f1b7d2e 100755 --- a/tests/test_sftp.py +++ b/tests/test_sftp.py @@ -446,7 +446,7 @@ class SFTPTest (unittest.TestCase): def test_A_readline_seek(self): """ create a text file and write a bunch of text into it. then count the lines - in the file, and seek around to retreive particular lines. this should + in the file, and seek around to retrieve particular lines. this should verify that read buffering and 'tell' work well together, and that read buffering is reset on 'seek'. """ @@ -462,6 +462,7 @@ class SFTPTest (unittest.TestCase): line_number += 1 pos_list.append(loc) loc = f.tell() + self.assertTrue(f.seekable()) f.seek(pos_list[6], f.SEEK_SET) self.assertEqual(f.readline(), 'Nouzilly, France.\n') f.seek(pos_list[17], f.SEEK_SET) @@ -643,7 +644,7 @@ class SFTPTest (unittest.TestCase): with sftp.open(FOLDER + '/bunny.txt', 'rb') as f: self.assertEqual(text, f.read(128)) - self.assertEqual((41, 41), saved_progress[-1]) + self.assertEqual([(41, 41)], saved_progress) os.unlink(localname) fd, localname = mkstemp() @@ -653,7 +654,7 @@ class SFTPTest (unittest.TestCase): with open(localname, 'rb') as f: self.assertEqual(text, f.read(128)) - self.assertEqual((41, 41), saved_progress[-1]) + self.assertEqual([(41, 41)], saved_progress) os.unlink(localname) sftp.unlink(FOLDER + '/bunny.txt') @@ -729,7 +730,8 @@ class SFTPTest (unittest.TestCase): f.readv([(0, 12)]) with sftp.open(FOLDER + '/zero', 'r') as f: - f.prefetch() + file_size = f.stat().st_size + f.prefetch(file_size) f.read(100) finally: sftp.unlink(FOLDER + '/zero') @@ -844,6 +846,11 @@ class SFTPTest (unittest.TestCase): sftp.remove('%s/nonutf8data' % FOLDER) + def test_sftp_attributes_empty_str(self): + sftp_attributes = SFTPAttributes() + self.assertEqual(str(sftp_attributes), "?--------- 1 0 0 0 (unknown date) ?") + + if __name__ == '__main__': SFTPTest.init_loopback() # logging is required by test_N_file_with_percent diff --git a/tests/test_sftp_big.py b/tests/test_sftp_big.py index abed27b8..cfad5682 100644 --- a/tests/test_sftp_big.py +++ b/tests/test_sftp_big.py @@ -132,7 +132,8 @@ class BigSFTPTest (unittest.TestCase): start = time.time() with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f: - f.prefetch() + file_size = f.stat().st_size + f.prefetch(file_size) # read on odd boundaries to make sure the bytes aren't getting scrambled n = 0 @@ -171,7 +172,8 @@ class BigSFTPTest (unittest.TestCase): chunk = 793 for i in range(10): with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f: - f.prefetch() + file_size = f.stat().st_size + f.prefetch(file_size) base_offset = (512 * 1024) + 17 * random.randint(1000, 2000) offsets = [base_offset + j * chunk for j in range(100)] # randomly seek around and read them out @@ -245,9 +247,11 @@ class BigSFTPTest (unittest.TestCase): for i in range(10): with sftp.open('%s/hongry.txt' % FOLDER, 'r') as f: - f.prefetch() + file_size = f.stat().st_size + f.prefetch(file_size) with sftp.open('%s/hongry.txt' % FOLDER, 'r') as f: - f.prefetch() + file_size = f.stat().st_size + f.prefetch(file_size) for n in range(1024): data = f.read(1024) self.assertEqual(data, kblob) @@ -275,7 +279,8 @@ class BigSFTPTest (unittest.TestCase): self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024) with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f: - f.prefetch() + file_size = f.stat().st_size + f.prefetch(file_size) data = f.read(1024) self.assertEqual(data, kblob) @@ -353,7 +358,8 @@ class BigSFTPTest (unittest.TestCase): # try to read it too. with sftp.open('%s/hongry.txt' % FOLDER, 'r', 128 * 1024) as f: - f.prefetch() + file_size = f.stat().st_size + f.prefetch(file_size) total = 0 while total < 1024 * 1024: total += len(f.read(32 * 1024)) diff --git a/tests/test_ssh_exception.py b/tests/test_ssh_exception.py new file mode 100644 index 00000000..18f2a97d --- /dev/null +++ b/tests/test_ssh_exception.py @@ -0,0 +1,31 @@ +import pickle +import unittest + +from paramiko.ssh_exception import NoValidConnectionsError + + +class NoValidConnectionsErrorTest (unittest.TestCase): + + def test_pickling(self): + # Regression test for https://github.com/paramiko/paramiko/issues/617 + exc = NoValidConnectionsError({('127.0.0.1', '22'): Exception()}) + new_exc = pickle.loads(pickle.dumps(exc)) + self.assertEqual(type(exc), type(new_exc)) + self.assertEqual(str(exc), str(new_exc)) + self.assertEqual(exc.args, new_exc.args) + + def test_error_message_for_single_host(self): + exc = NoValidConnectionsError({('127.0.0.1', '22'): Exception()}) + assert "Unable to connect to port 22 on 127.0.0.1" in str(exc) + + def test_error_message_for_two_hosts(self): + exc = NoValidConnectionsError({('127.0.0.1', '22'): Exception(), + ('::1', '22'): Exception()}) + assert "Unable to connect to port 22 on 127.0.0.1 or ::1" in str(exc) + + def test_error_message_for_multiple_hosts(self): + exc = NoValidConnectionsError({('127.0.0.1', '22'): Exception(), + ('::1', '22'): Exception(), + ('10.0.0.42', '22'): Exception()}) + exp = "Unable to connect to port 22 on 10.0.0.42, 127.0.0.1 or ::1" + assert exp in str(exc) diff --git a/tests/test_ssh_gss.py b/tests/test_ssh_gss.py index e20d348f..967b3b81 100644 --- a/tests/test_ssh_gss.py +++ b/tests/test_ssh_gss.py @@ -43,9 +43,7 @@ class NullServer (paramiko.ServerInterface): return paramiko.AUTH_FAILED def enable_auth_gssapi(self): - UseGSSAPI = True - GSSAPICleanupCredentials = True - return UseGSSAPI + return True def check_channel_request(self, kind, chanid): return paramiko.OPEN_SUCCEEDED diff --git a/tests/test_transport.py b/tests/test_transport.py index 5cf9a867..c426cef1 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -28,15 +28,19 @@ import socket import time import threading import random +from hashlib import sha1 import unittest -from paramiko import Transport, SecurityOptions, ServerInterface, RSAKey, DSSKey, \ - SSHException, ChannelException +from paramiko import ( + Transport, SecurityOptions, ServerInterface, RSAKey, DSSKey, SSHException, + ChannelException, Packetizer, +) from paramiko import AUTH_FAILED, AUTH_SUCCESSFUL from paramiko import OPEN_SUCCEEDED, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED -from paramiko.common import MSG_KEXINIT, cMSG_CHANNEL_WINDOW_ADJUST, \ - MIN_PACKET_SIZE, MIN_WINDOW_SIZE, MAX_WINDOW_SIZE, \ - DEFAULT_WINDOW_SIZE, DEFAULT_MAX_PACKET_SIZE +from paramiko.common import ( + MSG_KEXINIT, cMSG_CHANNEL_WINDOW_ADJUST, MIN_PACKET_SIZE, MIN_WINDOW_SIZE, + MAX_WINDOW_SIZE, DEFAULT_WINDOW_SIZE, DEFAULT_MAX_PACKET_SIZE, +) from paramiko.py3compat import bytes from paramiko.message import Message from tests.loop import LoopSocket @@ -77,7 +81,7 @@ class NullServer (ServerInterface): return OPEN_SUCCEEDED def check_channel_exec_request(self, channel, command): - if command != 'yes': + if command != b'yes': return False return True @@ -161,6 +165,15 @@ class TransportTest(unittest.TestCase): except TypeError: pass + def test_1b_security_options_reset(self): + o = self.tc.get_security_options() + # should not throw any exceptions + o.ciphers = o.ciphers + o.digests = o.digests + o.key_types = o.key_types + o.kex = o.kex + o.compression = o.compression + def test_2_compute_key(self): self.tc.K = 123281095979686581523377256114209720774539068973101330872763622971399429481072519713536292772709507296759612401802191955568143056534122385270077606457721553469730659233569339356140085284052436697480759510519672848743794433460113118986816826624865291116513647975790797391795651716378444844877749505443714557929 self.tc.H = b'\x0C\x83\x07\xCD\xE6\x85\x6F\xF3\x0B\xA9\x36\x84\xEB\x0F\x04\xC2\x52\x0E\x9E\xD3' @@ -251,7 +264,7 @@ class TransportTest(unittest.TestCase): chan = self.tc.open_session() schan = self.ts.accept(1.0) try: - chan.exec_command('no') + chan.exec_command(b'command contains \xfc and is not a valid UTF-8 string') self.assertTrue(False) except SSHException: pass @@ -447,9 +460,11 @@ class TransportTest(unittest.TestCase): bytes = self.tc.packetizer._Packetizer__sent_bytes chan.send('x' * 1024) bytes2 = self.tc.packetizer._Packetizer__sent_bytes + block_size = self.tc._cipher_info[self.tc.local_cipher]['block-size'] + mac_size = self.tc._mac_info[self.tc.local_mac]['size'] # tests show this is actually compressed to *52 bytes*! including packet overhead! nice!! :) self.assertTrue(bytes2 - bytes < 1024) - self.assertEqual(52, bytes2 - bytes) + self.assertEqual(16 + block_size + mac_size, bytes2 - bytes) chan.close() schan.close() @@ -792,3 +807,54 @@ class TransportTest(unittest.TestCase): (None, DEFAULT_WINDOW_SIZE), (2**32, MAX_WINDOW_SIZE)]: self.assertEqual(self.tc._sanitize_window_size(val), correct) + + def test_L_handshake_timeout(self): + """ + verify that we can get a hanshake timeout. + """ + # Tweak client Transport instance's Packetizer instance so + # its read_message() sleeps a bit. This helps prevent race conditions + # where the client Transport's timeout timer thread doesn't even have + # time to get scheduled before the main client thread finishes + # handshaking with the server. + # (Doing this on the server's transport *sounds* more 'correct' but + # actually doesn't work nearly as well for whatever reason.) + class SlowPacketizer(Packetizer): + def read_message(self): + time.sleep(1) + return super(SlowPacketizer, self).read_message() + # NOTE: prettttty sure since the replaced .packetizer Packetizer is now + # no longer doing anything with its copy of the socket...everything'll + # be fine. Even tho it's a bit squicky. + self.tc.packetizer = SlowPacketizer(self.tc.sock) + # Continue with regular test red tape. + host_key = RSAKey.from_private_key_file(test_path('test_rsa.key')) + public_host_key = RSAKey(data=host_key.asbytes()) + self.ts.add_server_key(host_key) + event = threading.Event() + server = NullServer() + self.assertTrue(not event.is_set()) + self.tc.handshake_timeout = 0.000000000001 + self.ts.start_server(event, server) + self.assertRaises(EOFError, self.tc.connect, + hostkey=public_host_key, + username='slowdive', + password='pygmalion') + + def test_M_select_after_close(self): + """ + verify that select works when a channel is already closed. + """ + self.setup_test_server() + chan = self.tc.open_session() + chan.invoke_shell() + schan = self.ts.accept(1.0) + schan.close() + + # give client a moment to receive close notification + time.sleep(0.1) + + r, w, e = select.select([chan], [], [], 0.1) + self.assertEqual([chan], r) + self.assertEqual([], w) + self.assertEqual([], e) diff --git a/tests/test_util.py b/tests/test_util.py index bfdc525e..7880e156 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -30,6 +30,7 @@ import paramiko.util from paramiko.util import lookup_ssh_host_config as host_config, safe_string from paramiko.py3compat import StringIO, byte_ord, b +# Note some lines in this configuration have trailing spaces on purpose test_config_file = """\ Host * User robey @@ -65,7 +66,7 @@ from paramiko import * class UtilTest(unittest.TestCase): - def test_1_import(self): + def test_import(self): """ verify that all the classes can be imported from paramiko. """ @@ -103,30 +104,30 @@ class UtilTest(unittest.TestCase): self.assertTrue('SSHConfig' in symbols) self.assertTrue('util' in symbols) - def test_2_parse_config(self): + def test_parse_config(self): global test_config_file f = StringIO(test_config_file) config = paramiko.util.parse_ssh_config(f) self.assertEqual(config._config, [{'host': ['*'], 'config': {}}, {'host': ['*'], 'config': {'identityfile': ['~/.ssh/id_rsa'], 'user': 'robey'}}, {'host': ['*.example.com'], 'config': {'user': 'bjork', 'port': '3333'}}, - {'host': ['*'], 'config': {'crazy': 'something dumb '}}, + {'host': ['*'], 'config': {'crazy': 'something dumb'}}, {'host': ['spoo.example.com'], 'config': {'crazy': 'something else'}}]) - def test_3_host_config(self): + def test_host_config(self): global test_config_file f = StringIO(test_config_file) config = paramiko.util.parse_ssh_config(f) for host, values in { - 'irc.danger.com': {'crazy': 'something dumb ', + 'irc.danger.com': {'crazy': 'something dumb', 'hostname': 'irc.danger.com', 'user': 'robey'}, - 'irc.example.com': {'crazy': 'something dumb ', + 'irc.example.com': {'crazy': 'something dumb', 'hostname': 'irc.example.com', 'user': 'robey', 'port': '3333'}, - 'spoo.example.com': {'crazy': 'something dumb ', + 'spoo.example.com': {'crazy': 'something dumb', 'hostname': 'spoo.example.com', 'user': 'robey', 'port': '3333'} @@ -140,12 +141,12 @@ class UtilTest(unittest.TestCase): values ) - def test_4_generate_key_bytes(self): + def test_generate_key_bytes(self): x = paramiko.util.generate_key_bytes(sha1, b'ABCDEFGH', 'This is my secret passphrase.', 64) hex = ''.join(['%02x' % byte_ord(c) for c in x]) self.assertEqual(hex, '9110e2f6793b69363e58173e9436b13a5a4b339005741d5c680e505f57d871347b4239f14fb5c46e857d5e100424873ba849ac699cea98d729e57b3e84378e8b') - def test_5_host_keys(self): + def test_host_keys(self): with open('hostfile.temp', 'w') as f: f.write(test_hosts_file) try: @@ -158,7 +159,7 @@ class UtilTest(unittest.TestCase): finally: os.unlink('hostfile.temp') - def test_7_host_config_expose_issue_33(self): + def test_host_config_expose_issue_33(self): test_config_file = """ Host www13.* Port 22 @@ -177,7 +178,7 @@ Host * {'hostname': host, 'port': '22'} ) - def test_8_eintr_retry(self): + def test_eintr_retry(self): self.assertEqual('foo', paramiko.util.retry_on_signal(lambda: 'foo')) # Variables that are set by raises_intr @@ -202,7 +203,7 @@ Host * self.assertRaises(AssertionError, lambda: paramiko.util.retry_on_signal(raises_other_exception)) - def test_9_proxycommand_config_equals_parsing(self): + def test_proxycommand_config_equals_parsing(self): """ ProxyCommand should not split on equals signs within the value. """ @@ -221,7 +222,7 @@ Host equals-delimited 'foo bar=biz baz' ) - def test_10_proxycommand_interpolation(self): + def test_proxycommand_interpolation(self): """ ProxyCommand should perform interpolation on the value """ @@ -247,7 +248,20 @@ Host * val ) - def test_11_host_config_test_negation(self): + def test_proxycommand_tilde_expansion(self): + """ + Tilde (~) should be expanded inside ProxyCommand + """ + config = paramiko.util.parse_ssh_config(StringIO(""" +Host test + ProxyCommand ssh -F ~/.ssh/test_config bastion nc %h %p +""")) + self.assertEqual( + 'ssh -F %s/.ssh/test_config bastion nc test 22' % os.path.expanduser('~'), + host_config('test', config)['proxycommand'] + ) + + def test_host_config_test_negation(self): test_config_file = """ Host www13.* !*.example.com Port 22 @@ -269,7 +283,7 @@ Host * {'hostname': host, 'port': '8080'} ) - def test_12_host_config_test_proxycommand(self): + def test_host_config_test_proxycommand(self): test_config_file = """ Host proxy-with-equal-divisor-and-space ProxyCommand = foo=bar @@ -297,7 +311,7 @@ ProxyCommand foo=bar:%h-%p values ) - def test_11_host_config_test_identityfile(self): + def test_host_config_test_identityfile(self): test_config_file = """ IdentityFile id_dsa0 @@ -327,7 +341,7 @@ IdentityFile id_dsa22 values ) - def test_12_config_addressfamily_and_lazy_fqdn(self): + def test_config_addressfamily_and_lazy_fqdn(self): """ Ensure the code path honoring non-'all' AddressFamily doesn't asplode """ @@ -343,13 +357,13 @@ IdentityFile something_%l_using_fqdn self.assertEqual(32767, paramiko.util.clamp_value(32767, 32765, 32769)) self.assertEqual(32769, paramiko.util.clamp_value(32767, 32770, 32769)) - def test_13_config_dos_crlf_succeeds(self): + def test_config_dos_crlf_succeeds(self): config_file = StringIO("host abcqwerty\r\nHostName 127.0.0.1\r\n") config = paramiko.SSHConfig() config.parse(config_file) self.assertEqual(config.lookup("abcqwerty")["hostname"], "127.0.0.1") - def test_14_get_hostnames(self): + def test_get_hostnames(self): f = StringIO(test_config_file) config = paramiko.util.parse_ssh_config(f) self.assertEqual(config.get_hostnames(), set(['*', '*.example.com', 'spoo.example.com'])) @@ -461,9 +475,10 @@ Host param3 parara safe_has_bytes = safe_string(has_bytes) expected_bytes = b("has %07%03 bytes") err = "{0!r} != {1!r}" - assert safe_vanilla == vanilla, err.format(safe_vanilla, vanilla) - assert safe_has_bytes == expected_bytes, \ - err.format(safe_has_bytes, expected_bytes) + msg = err.format(safe_vanilla, vanilla) + assert safe_vanilla == vanilla, msg + msg = err.format(safe_has_bytes, expected_bytes) + assert safe_has_bytes == expected_bytes, msg def test_proxycommand_none_issue_418(self): test_config_file = """ @@ -484,3 +499,33 @@ Host proxycommand-with-equals-none paramiko.util.lookup_ssh_host_config(host, config), values ) + + def test_proxycommand_none_masking(self): + # Re: https://github.com/paramiko/paramiko/issues/670 + source_config = """ +Host specific-host + ProxyCommand none + +Host other-host + ProxyCommand other-proxy + +Host * + ProxyCommand default-proxy +""" + config = paramiko.SSHConfig() + config.parse(StringIO(source_config)) + # When bug is present, the full stripping-out of specific-host's + # ProxyCommand means it actually appears to pick up the default + # ProxyCommand value instead, due to cascading. It should (for + # backwards compatibility reasons in 1.x/2.x) appear completely blank, + # as if the host had no ProxyCommand whatsoever. + # Threw another unrelated host in there just for sanity reasons. + self.assertFalse('proxycommand' in config.lookup('specific-host')) + self.assertEqual( + config.lookup('other-host')['proxycommand'], + 'other-proxy' + ) + self.assertEqual( + config.lookup('some-random-host')['proxycommand'], + 'default-proxy' + ) diff --git a/tox-requirements.txt b/tox-requirements.txt deleted file mode 100644 index 26224ce6..00000000 --- a/tox-requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -# Not sure why tox can't just read setup.py? -pycrypto diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 7d4fcf8a..00000000 --- a/tox.ini +++ /dev/null @@ -1,6 +0,0 @@ -[tox] -envlist = py26,py27,py32,py33,py34 - -[testenv] -commands = pip install -q -r tox-requirements.txt - python test.py |