mirror of
https://github.com/HuFlungDu/pylibmeshctrl.git
synced 2026-02-20 13:42:11 +00:00
Compare commits
68 Commits
0.0.7
...
fix/device
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5947e48c5b | ||
|
|
31a8f00cd0 | ||
|
|
871d36b334 | ||
|
|
59fb1f104e | ||
|
|
9bd3e10ed7 | ||
|
|
28e1d94ab9 | ||
|
|
51325a89d3 | ||
|
|
97dff80222 | ||
|
|
8da445348b | ||
|
|
ab1fba5cc1 | ||
|
|
34a80cdda7 | ||
|
|
fcf523dd62 | ||
|
|
9a1311167d | ||
|
|
c2319fcf29 | ||
|
|
4d1c25a35c | ||
|
|
e226fff8dd | ||
|
|
a07b0f129a | ||
|
|
64dc5eccdf | ||
|
|
1a7714663a | ||
|
|
0a59edd19a | ||
|
|
f8600b09fe | ||
|
|
351f425ce5 | ||
|
|
77e76aeb7c | ||
|
|
5393321f7b | ||
|
|
79554ebad6 | ||
|
|
1dbcd012ec | ||
|
|
ace6884991 | ||
|
|
61eebf1532 | ||
|
|
fcfeac21a8 | ||
|
|
19d10ee050 | ||
|
|
0c9ebf0ff2 | ||
|
|
2556e72a73 | ||
|
|
cda5f610a1 | ||
|
|
564d466ff9 | ||
|
|
125e6ac6ac | ||
|
|
1b849473bb | ||
|
|
df25652ba6 | ||
|
|
9668e4d507 | ||
|
|
fe4c2fe874 | ||
|
|
bb7cf17cd3 | ||
|
|
6919da4a42 | ||
|
|
ff120490fa | ||
|
|
d9991156f6 | ||
|
|
4fea858fbc | ||
|
|
3b4a18b379 | ||
|
|
c072d6012a | ||
|
|
0ee2e2dc94 | ||
|
|
f2d9fcd295 | ||
|
|
7456743709 | ||
|
|
07b828a150 | ||
|
|
cd7a356eb5 | ||
|
|
5ee2c8edf3 | ||
|
|
d3d5b87287 | ||
|
|
18eb2de5b6 | ||
|
|
ec23ba458d | ||
|
|
a3c721318d | ||
|
|
4eda4e6c08 | ||
|
|
ab2a4c40bc | ||
|
|
0a657cee48 | ||
|
|
03441161b2 | ||
|
|
24adf3baa5 | ||
|
|
1adaccabc0 | ||
|
|
20843dbea7 | ||
|
|
af6c020506 | ||
|
|
b870aa25bd | ||
|
|
c63604f624 | ||
|
|
f0e09c0082 | ||
|
|
184ce3ef3e |
@@ -19,7 +19,7 @@ formats:
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
python: "3.13"
|
||||
|
||||
python:
|
||||
install:
|
||||
|
||||
@@ -2,7 +2,45 @@
|
||||
Changelog
|
||||
=========
|
||||
|
||||
Version 0.1
|
||||
===========
|
||||
version 1.2.0
|
||||
=============
|
||||
|
||||
Create
|
||||
Bugs:
|
||||
* Fixed agent sometimes being None causing an oxception
|
||||
* Fixed bad code in device_open_url
|
||||
|
||||
Features:
|
||||
* Changed websockets version to 15. This now uses the proxy implemention from that library, instead of the previous hack.
|
||||
* Added lastaddr and lastconnect to list_devices API
|
||||
|
||||
version 1.1.2
|
||||
=============
|
||||
Bugs:
|
||||
* Fixed semver for requirements. New version of websockets broke this library.
|
||||
|
||||
Security:
|
||||
* Updated cryptogaphy to ~44.0.1 to fix ssl vulnerability.
|
||||
|
||||
Version 1.1.1
|
||||
=============
|
||||
Bugs:
|
||||
* Fixed bug when running device_info when user has access to multiple meshes
|
||||
|
||||
Version 1.1.0
|
||||
=============
|
||||
Features:
|
||||
* Added overrides for meshcentral files for testing purposes
|
||||
* Added `users` field to `device` object
|
||||
|
||||
Bugs:
|
||||
* Fixed connection errors not raising immediately
|
||||
* Fixed run_commands parsing return from multiple devices incorrectly
|
||||
* Fixed listening to raw not removing its listener correctly
|
||||
* Fixed javascript timecodes not being handled in gnu environments
|
||||
* Changed some fstring formatting that locked the library into python >3.13
|
||||
|
||||
|
||||
Version 1.0.0
|
||||
=============
|
||||
|
||||
First release
|
||||
|
||||
13
README.rst
13
README.rst
@@ -38,14 +38,13 @@ Library for remotely interacting with a
|
||||
Installation
|
||||
------------
|
||||
|
||||
pip install meshctrl
|
||||
pip install libmeshctrl
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
This module is implemented as a primarily asynchronous library
|
||||
(asyncio), mostly through the ``Session`` class, which is exported as
|
||||
default. Because the library is asynchronous, you must wait for it to be
|
||||
(asyncio), mostly through the `Session <https://pylibmeshctrl.readthedocs.io/en/latest/api/meshctrl.html#meshctrl.session.Session>`__ class. Because the library is asynchronous, you must wait for it to be
|
||||
initialized before interacting with the server. The preferred way to do
|
||||
this is to use the async context manager pattern:
|
||||
|
||||
@@ -53,20 +52,20 @@ this is to use the async context manager pattern:
|
||||
|
||||
import meshctrl
|
||||
|
||||
async with meshctrl.session.Session(url, **options):
|
||||
async with meshctrl.Session(url, **options):
|
||||
print(await session.list_users())
|
||||
...
|
||||
|
||||
However, if you prefer to instantiate the object yourself, you can
|
||||
simply use the ``initialized`` property:
|
||||
simply use the `initialized <https://pylibmeshctrl.readthedocs.io/en/latest/api/meshctrl.html#meshctrl.session.Session.initialized>`__ property:
|
||||
|
||||
.. code:: python
|
||||
|
||||
session = meshctrl.session.Session(url, **options)
|
||||
session = meshctrl.Session(url, **options)
|
||||
await session.initialized.wait()
|
||||
|
||||
Note that, in this case, you will be rquired to clean up tho session
|
||||
using its ``close`` method.
|
||||
using its `close <https://pylibmeshctrl.readthedocs.io/en/latest/api/meshctrl.html#meshctrl.session.Session.close>`__ method.
|
||||
|
||||
Session Parameters
|
||||
------------------
|
||||
|
||||
@@ -5,8 +5,8 @@ sphinx>=3.2.1
|
||||
sphinx-jinja2-compat>=0.1.1
|
||||
sphinx-toolbox>=2.16.0
|
||||
# sphinx_rtd_theme
|
||||
cffi==1.17.1
|
||||
cryptography==43.0.3
|
||||
pycparser==2.22
|
||||
websockets==13.1
|
||||
cffi~=1.17.1
|
||||
cryptography~=44.0.1
|
||||
pycparser~=2.22
|
||||
websockets~=15.0.0
|
||||
enum_tools
|
||||
BIN
requirements.txt
BIN
requirements.txt
Binary file not shown.
@@ -24,7 +24,7 @@ platforms = any
|
||||
# Add here all kinds of additional classifiers as defined under
|
||||
# https://pypi.org/classifiers/
|
||||
classifiers =
|
||||
Development Status :: 4 - Beta
|
||||
Development Status :: 5 - Production/Stable
|
||||
Programming Language :: Python
|
||||
|
||||
|
||||
@@ -44,8 +44,9 @@ python_requires = >=3.8
|
||||
# For more information, check out https://semver.org/.
|
||||
install_requires =
|
||||
importlib-metadata
|
||||
cryptography>=43.0.3
|
||||
websockets>=13.1
|
||||
cryptography~=44.0.1
|
||||
websockets~=15.0.0
|
||||
python-socks[asyncio]~=2.5.3
|
||||
|
||||
|
||||
[options.packages.find]
|
||||
|
||||
@@ -12,6 +12,7 @@ class Device(object):
|
||||
name (str|None): Device name as it is shown on the meshcentral server
|
||||
description (str|None): Device description as it is shown on the meshcentral server. Also accepted as desc.
|
||||
tags (list[str]|None): tags associated with device.
|
||||
users (list[str]|None): latest known usernames which have logged in.
|
||||
created_at (datetime.Datetime|int|None): Time at which device mas created. Also accepted as agct.
|
||||
computer_name (str|None): Device name as reported from the agent. This may be different from name. Also accepted as rname.
|
||||
icon (~meshctrl.constants.Icon): Icon displayed on the website
|
||||
@@ -38,6 +39,7 @@ class Device(object):
|
||||
name (str|None): Device name as it is shown on the meshcentral server
|
||||
description (str|None): Device description as it is shown on the meshcentral server.
|
||||
tags (list[str]): tags associated with device.
|
||||
users (list[str]): latest known usernames which have logged in.
|
||||
computer_name (str|None): Device name as reported from the agent. This may be different from name. Also accepted as rname.
|
||||
icon (~meshctrl.constants.Icon): Icon displayed on the website
|
||||
mesh (~meshctrl.mesh.Mesh|None): Mesh object under which this device exists. Is None for individual device access.
|
||||
@@ -54,11 +56,11 @@ class Device(object):
|
||||
links (dict[str, ~meshctrl.types.UserLink]|None): Collection of links for the device
|
||||
details (dict[str, dict]): Extra details about the device. These are not well defined, but are filled by calling :py:meth:`~meshctrl.session.Session.list_devices` with `details=True`.
|
||||
'''
|
||||
def __init__(self, nodeid, session, agent=None,
|
||||
def __init__(self, nodeid, session, agent=None,
|
||||
name=None, desc=None, description=None,
|
||||
tags=None,
|
||||
agct=None, created_at=None,
|
||||
rname=None, computer_name=None, icon=constants.Icon.desktop,
|
||||
tags=None, users=None,
|
||||
agct=None, created_at=None,
|
||||
rname=None, computer_name=None, icon=constants.Icon.desktop,
|
||||
mesh=None, mtype=None, meshtype=None, groupname=None, meshname=None,
|
||||
domain=None, host=None, ip=None, conn=None, connected=None,
|
||||
pwr=None, powered_on=None,
|
||||
@@ -69,7 +71,7 @@ class Device(object):
|
||||
if links is None:
|
||||
links = {}
|
||||
self.links = links
|
||||
if ("ver" in agent):
|
||||
if agent and "ver" in agent:
|
||||
agent = {
|
||||
"version": agent["ver"],
|
||||
"id": agent["id"],
|
||||
@@ -90,13 +92,14 @@ class Device(object):
|
||||
self.description = description if description is not None else desc
|
||||
self.os_description = os_description if os_description is not None else osdesc
|
||||
self.tags = tags if tags is not None else []
|
||||
self.users = users if users is not None else []
|
||||
self.details = details if details is not None else {}
|
||||
|
||||
created_at = created_at if created_at is not None else agct
|
||||
if not isinstance(created_at, datetime.datetime) and created_at is not None:
|
||||
try:
|
||||
created_at = datetime.datetime.fromtimestamp(created_at)
|
||||
except OSError:
|
||||
except (OSError, ValueError):
|
||||
# Meshcentral returns in miliseconds, while fromtimestamp, and most of python, expects the argument in seconds. Try seconds frist, then translate from ms if it fails.
|
||||
# This doesn't work for really early timestamps, but I don't expect that to be a problem here.
|
||||
created_at = datetime.datetime.fromtimestamp(created_at/1000.0)
|
||||
@@ -106,7 +109,7 @@ class Device(object):
|
||||
if not isinstance(lastconnect, datetime.datetime) and lastconnect is not None:
|
||||
try:
|
||||
lastconnect = datetime.datetime.fromtimestamp(lastconnect)
|
||||
except OSError:
|
||||
except (OSError, ValueError):
|
||||
# Meshcentral returns in miliseconds, while fromtimestamp, and most of python, expects the argument in seconds. Try seconds frist, then translate from ms if it fails.
|
||||
# This doesn't work for really early timestamps, but I don't expect that to be a problem here.
|
||||
lastconnect = datetime.datetime.fromtimestamp(lastconnect/1000.0)
|
||||
@@ -129,7 +132,7 @@ class Device(object):
|
||||
Returns:
|
||||
bool: True on success, raise otherwise
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error text from server if there is a failure
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
@@ -147,7 +150,7 @@ class Device(object):
|
||||
Returns:
|
||||
bool: True on success, raise otherwise
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error text from server if there is a failure
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
@@ -166,7 +169,7 @@ class Device(object):
|
||||
Returns:
|
||||
bool: True on success, raise otherwise
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error text from server if there is a failure
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
@@ -183,7 +186,7 @@ class Device(object):
|
||||
Returns:
|
||||
~meshctrl.device.Device: Object representing the state of the device. This will be a new device, it will not update this device.
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
ValueError: `Invalid device id` if device is not found
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
@@ -205,7 +208,7 @@ class Device(object):
|
||||
Returns:
|
||||
bool: True if successful, raise otherwise
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error text from server if there is a failure
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
@@ -227,7 +230,7 @@ class Device(object):
|
||||
Returns:
|
||||
~meshctrl.types.RunCommandResponse: Output of command
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error text from server if there is a failure
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
ValueError: `Invalid device id` if device is not found
|
||||
@@ -268,7 +271,7 @@ class Device(object):
|
||||
Returns:
|
||||
bool: True if successful
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
@@ -286,7 +289,7 @@ class Device(object):
|
||||
Returns:
|
||||
bool: True if successful
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
@@ -302,7 +305,7 @@ class Device(object):
|
||||
Returns:
|
||||
bool: True if successful
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
@@ -317,7 +320,7 @@ class Device(object):
|
||||
Returns:
|
||||
bool: True if successful
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
@@ -340,10 +343,10 @@ class Device(object):
|
||||
def __str__(self):
|
||||
return f"<Device: nodeid={self.nodeid} name={self.name} description={self.description} computer_name={self.computer_name} icon={self.icon} "\
|
||||
f"mesh={self.mesh} meshtype={self.meshtype} meshname={self.meshname} domain={self.domain} host={self.host} ip={self.ip} "\
|
||||
f"tags={self.tags} details={self.details} created_at={self.created_at} lastaddr={self.lastaddr} lastconnect={self.lastconnect} "\
|
||||
f"tags={self.tags} users={self.users} details={self.details} created_at={self.created_at} lastaddr={self.lastaddr} lastconnect={self.lastconnect} "\
|
||||
f"connected={self.connected} powered_on={self.powered_on} os_description={self.os_description} links={self.links} _extra_props={self._extra_props}>"
|
||||
def __repr__(self):
|
||||
return f"Device(nodeid={repr(self.nodeid)}, session={repr(self._session)}, name={repr(self.name)}, description={repr(self.description)}, computer_name={repr(self.computer_name)}, icon={repr(self.icon)}, "\
|
||||
f"mesh={repr(self.mesh)}, meshtype={repr(self.meshtype)}, meshname={repr(self.meshname)}, domain={repr(self.domain)}, host={repr(self.host)}, ip={repr(self.ip)}, "\
|
||||
f"tags={repr(self.tags)}, details={repr(self.details)} created_at={repr(self.created_at)} lastaddr={repr(self.lastaddr)} lastconnect={repr(self.lastconnect)} "\
|
||||
f"connected={repr(self.connected)}, powered_on={repr(self.powered_on)}, os_description={repr(self.os_description)}, links={repr(self.links)}, **{repr(self._extra_props)})"
|
||||
f"tags={repr(self.tags)}, users={repr(self.users)}, details={repr(self.details)} created_at={repr(self.created_at)} lastaddr={repr(self.lastaddr)} lastconnect={repr(self.lastconnect)} "\
|
||||
f"connected={repr(self.connected)}, powered_on={repr(self.powered_on)}, os_description={repr(self.os_description)}, links={repr(self.links)}, **{repr(self._extra_props)})"
|
||||
|
||||
@@ -2,7 +2,9 @@ class MeshCtrlError(Exception):
|
||||
"""
|
||||
Base class for Meshctrl errors
|
||||
"""
|
||||
pass
|
||||
def __init__(self, message, *args, **kwargs):
|
||||
self.message = message
|
||||
super().__init__(message, *args, **kwargs)
|
||||
|
||||
class ServerError(MeshCtrlError):
|
||||
"""
|
||||
@@ -22,12 +24,10 @@ class FileTransferError(MeshCtrlError):
|
||||
|
||||
Attributes:
|
||||
stats (dict): {"result" (str): Human readable result, "size" (int): number of bytes successfully transferred}
|
||||
initialized (asyncio.Event): Event marking if the Session initialization has finished. Wait on this to wait for a connection.
|
||||
alive (bool): Whether the session connection is currently alive
|
||||
closed (asyncio.Event): Event that occurs when the session closes permanently
|
||||
"""
|
||||
def __init__(self, message, stats):
|
||||
self.stats = stats
|
||||
super().__init__(message)
|
||||
|
||||
class FileTransferCancelled(FileTransferError):
|
||||
"""
|
||||
|
||||
@@ -4,11 +4,32 @@ from . import exceptions
|
||||
from . import util
|
||||
import asyncio
|
||||
import json
|
||||
import importlib
|
||||
import importlib.util
|
||||
import shutil
|
||||
|
||||
# import urllib
|
||||
# import urllib.request
|
||||
import urllib.parse
|
||||
old_parse = urllib.parse
|
||||
# Default proxy handler uses OS defined no_proxy in order to be helpful. This is unhelpful for our usecase. Monkey patch out proxy getting functions, but don't effect the user's urllib instance.
|
||||
spec = importlib.util.find_spec('urllib')
|
||||
urllib = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(urllib)
|
||||
spec = importlib.util.find_spec('urllib.request')
|
||||
urllib.request = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(urllib.request)
|
||||
urllib.parse = old_parse
|
||||
urllib.request.getproxies_environment = lambda: {}
|
||||
urllib.request.getproxies_registry = lambda: {}
|
||||
urllib.request.getproxies_macosx_sysconf = lambda: {}
|
||||
urllib.request.getproxies = lambda: {}
|
||||
|
||||
class Files(tunnel.Tunnel):
|
||||
def __init__(self, session, nodeid):
|
||||
super().__init__(session, nodeid, constants.Protocol.FILES)
|
||||
def __init__(self, session, node):
|
||||
super().__init__(session, node.nodeid, constants.Protocol.FILES)
|
||||
self.recorded = None
|
||||
self._node = node
|
||||
self._request_id = 0
|
||||
self._request_queue = asyncio.Queue()
|
||||
self._download_finished = asyncio.Event()
|
||||
@@ -16,6 +37,17 @@ class Files(tunnel.Tunnel):
|
||||
self._current_request = None
|
||||
self._handle_requests_task = asyncio.create_task(self._handle_requests())
|
||||
self._chunk_size = 65564
|
||||
proxies = {}
|
||||
if self._session._proxy is not None:
|
||||
# We don't know which protocol the user is going to use, but we only need support one at a time, so just assume both
|
||||
proxies = {
|
||||
"http": self._session._proxy,
|
||||
"https": self._session._proxy,
|
||||
"no": ""
|
||||
}
|
||||
self._proxy_handler = urllib.request.ProxyHandler(proxies=proxies)
|
||||
self._http_opener = urllib.request.build_opener(self._proxy_handler, urllib.request.HTTPSHandler(context=self._session._ssl_context))
|
||||
|
||||
|
||||
def _get_request_id(self):
|
||||
self._request_id = (self._request_id+1)%(2**32-1)
|
||||
@@ -68,6 +100,7 @@ class Files(tunnel.Tunnel):
|
||||
|
||||
Args:
|
||||
directory (str): Path to the directory you wish to list
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Returns:
|
||||
list[~meshctrl.types.FilesLSItem]: The directory listing
|
||||
@@ -75,6 +108,7 @@ class Files(tunnel.Tunnel):
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error from server
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
"""
|
||||
data = await self._send_command({"action": "ls", "path": directory}, "ls", timeout=timeout)
|
||||
return data["dir"]
|
||||
@@ -101,10 +135,12 @@ class Files(tunnel.Tunnel):
|
||||
|
||||
Args:
|
||||
directory (str): Path of directory to create
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error from server
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
|
||||
Returns:
|
||||
bool: True if directory was created
|
||||
@@ -127,10 +163,12 @@ class Files(tunnel.Tunnel):
|
||||
path (str): Directory from which to delete files
|
||||
files (str|list[str]): File or files to remove from the directory
|
||||
recursive (bool): Whether to delete the files recursively
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error from server
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
|
||||
Returns:
|
||||
str: Info about the files removed. Something along the lines of Delete: "/path/to/file", or 'Delete recursive: "/path/to/dir", n element(s) removed'.
|
||||
@@ -155,10 +193,12 @@ class Files(tunnel.Tunnel):
|
||||
path (str): Directory from which to rename the file
|
||||
name (str): File to rename
|
||||
new_name (str): New name to give the file
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error from server
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
|
||||
Returns:
|
||||
str: Info about file renamed. Something along the lines of 'Rename: "/path/to/file" to "newfile"'.
|
||||
@@ -173,6 +213,7 @@ class Files(tunnel.Tunnel):
|
||||
|
||||
return tasks[2].result()
|
||||
|
||||
@util._check_socket
|
||||
async def upload(self, source, target, name=None, timeout=None):
|
||||
'''
|
||||
Upload a stream to a device.
|
||||
@@ -181,10 +222,12 @@ class Files(tunnel.Tunnel):
|
||||
source (io.IOBase): An IO instance from which to read the data. Must be open for reading.
|
||||
target (str): Path which to upload stream to on remote device
|
||||
name (str): Pass if target points at a directory instead of the file path. In that case, this will be the name of the file.
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.FileTransferError`: File transfer failed. Info available on the `stats` property
|
||||
:py:class:`~meshctrl.exceptions.FileTransferCancelled`: File transfer cancelled. Info available on the `stats` property
|
||||
asyncio.TimeoutError: Command timed out
|
||||
|
||||
Returns:
|
||||
dict: {result: bool whether upload succeeded, size: number of bytes uploaded}
|
||||
@@ -198,17 +241,26 @@ class Files(tunnel.Tunnel):
|
||||
raise request["error"]
|
||||
return request["return"]
|
||||
|
||||
async def download(self, source, target, timeout=None):
|
||||
def _http_download(self, url, target, timeout):
|
||||
response = self._http_opener.open(url, timeout=timeout)
|
||||
shutil.copyfileobj(response, target)
|
||||
|
||||
@util._check_socket
|
||||
async def download(self, source, target, skip_http_attempt=False, skip_ws_attempt=False, timeout=None):
|
||||
'''
|
||||
Download a file from a device into a writable stream.
|
||||
|
||||
Args:
|
||||
source (str): Path from which to download from device
|
||||
target (io.IOBase): Stream to which to write data. If None, create new BytesIO which is both readable and writable.
|
||||
skip_http_attempt (bool): Meshcentral has a way to download files through http(s) instead of through the websocket. This method tends to be much faster than using the websocket, so we try it first. Setting this to True will skip that attempt and just use the established websocket connection.
|
||||
skip_ws_attempt (bool): Like skip_http_attempt, except just throw an error if the http attempt fails instead of trying with the websocket
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.FileTransferError`: File transfer failed. Info available on the `stats` property
|
||||
:py:class:`~meshctrl.exceptions.FileTransferCancelled`: File transfer cancelled. Info available on the `stats` property
|
||||
asyncio.TimeoutError: Command timed out
|
||||
|
||||
Returns:
|
||||
dict: {result: bool whether download succeeded, size: number of bytes downloaded}
|
||||
@@ -216,6 +268,29 @@ class Files(tunnel.Tunnel):
|
||||
request_id = f"download_{self._get_request_id()}"
|
||||
data = { "action": 'download', "sub": 'start', "id": request_id, "path": source }
|
||||
request = {"id": request_id, "data": data, "type": "download", "source": source, "target": target, "size": 0, "finished": asyncio.Event(), "errored": asyncio.Event(), "error": None}
|
||||
if not skip_http_attempt:
|
||||
start_pos = target.tell()
|
||||
try:
|
||||
params = urllib.parse.urlencode({
|
||||
"c": self._authcookie["cookie"],
|
||||
"m": self._node.mesh.meshid.split("/")[-1],
|
||||
"n": self._node.nodeid.split("/")[-1],
|
||||
"f": source
|
||||
})
|
||||
url = self._session.url.replace('/control.ashx', f"/devicefile.ashx?{params}")
|
||||
url = url.replace("wss://", "https://").replace("ws://", "http://")
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, self._http_download, url, target, timeout)
|
||||
size = target.tell() - start_pos
|
||||
return {"result": True, "size": size}
|
||||
except* Exception as eg:
|
||||
if skip_ws_attempt:
|
||||
size = target.tell() - start_pos
|
||||
excs = eg.exceptions + (exceptions.FileTransferError("Errored", {"result": False, "size": size}),)
|
||||
raise ExceptionGroup("File download failed", excs)
|
||||
target.seek(start_pos)
|
||||
|
||||
await self._request_queue.put(request)
|
||||
await asyncio.wait_for(request["finished"].wait(), timeout)
|
||||
if request["error"] is not None:
|
||||
@@ -230,7 +305,7 @@ class Files(tunnel.Tunnel):
|
||||
return
|
||||
if cmd["reqid"] == self._current_request["id"]:
|
||||
if cmd["action"] == "uploaddone":
|
||||
self._current_request["return"] = {"result": "success", "size": self._current_request["size"]}
|
||||
self._current_request["return"] = {"result": True, "size": self._current_request["size"]}
|
||||
self._current_request["finished"].set()
|
||||
elif cmd["action"] == "uploadstart":
|
||||
while True:
|
||||
@@ -252,7 +327,7 @@ class Files(tunnel.Tunnel):
|
||||
if self._current_request["inflight"] == 0 and self._current_request["complete"]:
|
||||
await self._message_queue.put(json.dumps({ "action": 'uploaddone', "reqid": self._current_request["id"]}))
|
||||
elif cmd["action"] == "uploaderror":
|
||||
self._current_request["return"] = {"result": "canceled", "size": self._current_request["size"]}
|
||||
self._current_request["return"] = {"result": False, "size": self._current_request["size"]}
|
||||
self._current_request["error"] = exceptions.FileTransferError("Errored", self._current_request["return"])
|
||||
self._current_request["errored"].set()
|
||||
self._current_request["finished"].set()
|
||||
@@ -268,7 +343,7 @@ class Files(tunnel.Tunnel):
|
||||
self._current_request["target"].write(data[4:])
|
||||
self._current_request["size"] += len(data)-4
|
||||
if (data[3] & 1) != 0:
|
||||
self._current_request["return"] = {"result": "success", "size": self._current_request["size"]}
|
||||
self._current_request["return"] = {"result": True, "size": self._current_request["size"]}
|
||||
self._current_request["finished"].set()
|
||||
else:
|
||||
await self._message_queue.put(json.dumps({ "action": 'download', "sub": 'ack', "id": self._current_request["id"] }))
|
||||
@@ -279,7 +354,7 @@ class Files(tunnel.Tunnel):
|
||||
if cmd["sub"] == "start":
|
||||
await self._message_queue.put(json.dumps({ "action": 'download', "sub": 'startack', "id": self._current_request["id"] }))
|
||||
elif cmd["sub"] == "cancel":
|
||||
self._current_request["return"] = {"result": "canceled", "size": self._current_request["size"]}
|
||||
self._current_request["return"] = {"result": False, "size": self._current_request["size"]}
|
||||
self._current_request["error"] = exceptions.FileTransferCancelled("Cancelled", self._current_request["return"])
|
||||
self._current_request["errored"].set()
|
||||
self._current_request["finished"].set()
|
||||
|
||||
@@ -31,7 +31,7 @@ class Mesh(object):
|
||||
domain (str|None): Domain on server to which device is connected.
|
||||
links (dict[str, ~meshctrl.types.UserLink]|None): Collection of links for the device group
|
||||
'''
|
||||
def __init__(self, meshid, session, creation=None, created_at=None, name=None,
|
||||
def __init__(self, meshid, session, creation=None, created_at=None, name=None,
|
||||
mtype=None, meshtype=None, creatorid=None, desc=None, description=None,
|
||||
domain=None, creatorname=None, links=None, **kwargs):
|
||||
self.meshid = meshid
|
||||
@@ -46,7 +46,7 @@ class Mesh(object):
|
||||
if not isinstance(created_at, datetime.datetime) and created_at is not None:
|
||||
try:
|
||||
created_at = datetime.datetime.fromtimestamp(created_at)
|
||||
except OSError:
|
||||
except (OSError, ValueError):
|
||||
# Meshcentral returns in miliseconds, while fromtimestamp, and most of python, expects the argument in seconds. Try seconds frist, then translate from ms if it fails.
|
||||
# This doesn't work for really early timestamps, but I don't expect that to be a problem here.
|
||||
created_at = datetime.datetime.fromtimestamp(created_at/1000.0)
|
||||
@@ -83,7 +83,7 @@ class Mesh(object):
|
||||
Returns:
|
||||
dict[str, ~meshctrl.types.AddUsersToDeviceGroupResponse]: Object showing which were added correctly and which were not, along with their result messages. str is userid to map response.
|
||||
|
||||
Raises:
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
@@ -96,4 +96,4 @@ class Mesh(object):
|
||||
def __repr__(self):
|
||||
return f"Mesh(meshid={repr(self.meshid)}, session={repr(self._session)}, name={repr(self.name)}, description={repr(self.description)}, created_at={repr(self.created_at)}, "\
|
||||
f"meshtype={repr(self.meshtype)}, domain={repr(self.domain)}, "\
|
||||
f"created_at={repr(self.created_at)}, creatorid={repr(self.creatorid)}, creatorname={repr(self.creatorname)}, links={repr(self.links)}, **{repr(self._extra_props)})"
|
||||
f"created_at={repr(self.created_at)}, creatorid={repr(self.creatorid)}, creatorname={repr(self.creatorname)}, links={repr(self.links)}, **{repr(self._extra_props)})"
|
||||
|
||||
@@ -8,6 +8,8 @@ import json
|
||||
import datetime
|
||||
import io
|
||||
import ssl
|
||||
import urllib
|
||||
from python_socks.async_.asyncio import Proxy
|
||||
from . import constants
|
||||
from . import exceptions
|
||||
from . import util
|
||||
@@ -28,9 +30,10 @@ class Session(object):
|
||||
domain (str): Domain to connect to
|
||||
password (str): Password with which to connect. Can also be password generated from token.
|
||||
loginkey (str|bytes): Key from already handled login. Overrides username/password.
|
||||
proxy (str): "url:port" to use for proxy server NOTE: This is currently not implemented due to a limitation of the undersying websocket library. Upvote the issue if you find this important.
|
||||
proxy (str): "url:port" to use for proxy server
|
||||
token (str): Login token. This appears to be superfluous
|
||||
ignore_ssl (bool): Ignore SSL errors
|
||||
auto_reconnect (bool): In case of server failure, attempt to auto reconnect. All outstanding requests will be killed.
|
||||
|
||||
Returns:
|
||||
:py:class:`Session`: Session connected to url
|
||||
@@ -43,9 +46,19 @@ class Session(object):
|
||||
'''
|
||||
|
||||
def __init__(self, url, user=None, domain=None, password=None, loginkey=None, proxy=None, token=None, ignore_ssl=False, auto_reconnect=False):
|
||||
if len(url) < 5 or ((not url.startswith('wss://')) and (not url.startswith('ws://'))):
|
||||
parsed = urllib.parse.urlparse(url)
|
||||
|
||||
if parsed.scheme not in ("wss", "ws"):
|
||||
raise ValueError("Invalid URL")
|
||||
|
||||
port = 80
|
||||
if parsed.port is None:
|
||||
if parsed.scheme == "wss":
|
||||
port = 443
|
||||
p = list(parsed)
|
||||
p[1] = f"{parsed.hostname}:{port}"
|
||||
url = urllib.parse.urlunparse(p)
|
||||
|
||||
if (not url.endswith('/')):
|
||||
url += '/'
|
||||
|
||||
@@ -92,6 +105,7 @@ class Session(object):
|
||||
self._inflight = set()
|
||||
self._file_tunnels = {}
|
||||
self._ignore_ssl = ignore_ssl
|
||||
self.auto_reconnect = auto_reconnect
|
||||
|
||||
self._eventer = util.Eventer()
|
||||
|
||||
@@ -110,19 +124,17 @@ class Session(object):
|
||||
self._message_queue = asyncio.Queue()
|
||||
self._send_task = None
|
||||
self._listen_task = None
|
||||
self._ssl_context = None
|
||||
if self._ignore_ssl:
|
||||
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
||||
self._ssl_context.check_hostname = False
|
||||
self._ssl_context.verify_mode = ssl.CERT_NONE
|
||||
|
||||
async def _main_loop(self):
|
||||
try:
|
||||
options = {}
|
||||
if self._ignore_ssl:
|
||||
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
||||
ssl_context.check_hostname = False
|
||||
ssl_context.verify_mode = ssl.CERT_NONE
|
||||
options = { "ssl": ssl_context }
|
||||
|
||||
# Setup the HTTP proxy if needed
|
||||
# if (self._proxy != None):
|
||||
# options.agent = new https_proxy_agent(urllib.parse(self._proxy))
|
||||
if self._ssl_context is not None:
|
||||
options["ssl"] = self._ssl_context
|
||||
|
||||
headers = websockets.datastructures.Headers()
|
||||
|
||||
@@ -130,8 +142,9 @@ class Session(object):
|
||||
token = self._token if self._token else b""
|
||||
headers['x-meshauth'] = (base64.b64encode(self._user.encode()) + b',' + base64.b64encode(self._password.encode()) + token).decode()
|
||||
|
||||
|
||||
options["additional_headers"] = headers
|
||||
async for websocket in websockets.asyncio.client.connect(self.url, process_exception=util._process_websocket_exception, **options):
|
||||
async for websocket in websockets.asyncio.client.connect(self.url, proxy=self._proxy, process_exception=util._process_websocket_exception, **options):
|
||||
self.alive = True
|
||||
self._socket_open.set()
|
||||
try:
|
||||
@@ -139,10 +152,10 @@ class Session(object):
|
||||
tg.create_task(self._listen_data_task(websocket))
|
||||
tg.create_task(self._send_data_task(websocket))
|
||||
except* websockets.ConnectionClosed as e:
|
||||
|
||||
self._socket_open.clear()
|
||||
if not self.auto_reconnect:
|
||||
raise
|
||||
|
||||
except* Exception as eg:
|
||||
self.alive = False
|
||||
self._socket_open.clear()
|
||||
@@ -159,13 +172,20 @@ class Session(object):
|
||||
async def _send_data_task(self, websocket):
|
||||
while True:
|
||||
message = await self._message_queue.get()
|
||||
print(f"{self._user} send: {message}\n")
|
||||
await websocket.send(message)
|
||||
|
||||
async def _listen_data_task(self, websocket):
|
||||
async for message in websocket:
|
||||
print(f"{self._user} recv: {message}\n")
|
||||
data = json.loads(message)
|
||||
await self._eventer.emit("raw", message)
|
||||
# Meshcentral does pong wrong and breaks our parsing, so fix it here.
|
||||
if message == '{action:"pong"}':
|
||||
message = '{"action":"pong"}'
|
||||
|
||||
# Can't process non-json data, don't even try
|
||||
try:
|
||||
data = json.loads(message)
|
||||
except SyntaxError:
|
||||
continue
|
||||
action = data.get("action", None)
|
||||
await self._eventer.emit("server_event", data)
|
||||
if action == "close":
|
||||
@@ -197,12 +217,14 @@ class Session(object):
|
||||
return self._command_id
|
||||
|
||||
async def close(self):
|
||||
# Dunno yet
|
||||
self._main_loop_task.cancel()
|
||||
try:
|
||||
await self._main_loop_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
await asyncio.gather(*[tunnel.close() for name, tunnel in self._file_tunnels.items()])
|
||||
finally:
|
||||
self._main_loop_task.cancel()
|
||||
try:
|
||||
await self._main_loop_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
@util._check_socket
|
||||
async def __aenter__(self):
|
||||
@@ -234,14 +256,14 @@ class Session(object):
|
||||
return response
|
||||
|
||||
@util._check_socket
|
||||
async def _send_command_no_response_id(self, data, timeout=None):
|
||||
async def _send_command_no_response_id(self, data, action_override=None, timeout=None):
|
||||
responded = asyncio.Event()
|
||||
response = None
|
||||
async def _(data):
|
||||
nonlocal response
|
||||
response = data
|
||||
responded.set()
|
||||
self._eventer.once(data["action"], _)
|
||||
self._eventer.once(action_override if action_override is not None else data["action"], _)
|
||||
await self._message_queue.put(json.dumps(data))
|
||||
await asyncio.wait_for(responded.wait(), timeout=timeout)
|
||||
if isinstance(response, Exception):
|
||||
@@ -268,6 +290,23 @@ class Session(object):
|
||||
"""
|
||||
return self._user_info
|
||||
|
||||
async def ping(self, timeout=None):
|
||||
'''
|
||||
Ping the server. WARNING: Non namespaced call. Calling this function again before it returns may cause unintended consequences.
|
||||
|
||||
Args:
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Returns:
|
||||
dict: {"action": "pong"}
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error from server
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
data = await self._send_command_no_response_id({"action": "ping"}, action_override="pong", timeout=timeout)
|
||||
return data
|
||||
|
||||
async def list_device_groups(self, timeout=None):
|
||||
'''
|
||||
@@ -284,7 +323,7 @@ class Session(object):
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
data = await self._send_command({"action": "meshes"}, "list_device_groups", timeout)
|
||||
data = await self._send_command({"action": "meshes"}, "list_device_groups", timeout=timeout)
|
||||
return [mesh.Mesh(m["_id"], self, **m) for m in data["meshes"]]
|
||||
|
||||
|
||||
@@ -323,7 +362,7 @@ class Session(object):
|
||||
op["name"] = name
|
||||
if message:
|
||||
op["msg"] = message
|
||||
data = await self._send_command(op, "send_invite_email", timeout)
|
||||
data = await self._send_command(op, "send_invite_email", timeout=timeout)
|
||||
if ("result" in data and data["result"].lower() != "ok"):
|
||||
raise exceptions.ServerError(data["result"])
|
||||
return True
|
||||
@@ -359,7 +398,7 @@ class Session(object):
|
||||
op["meshname"] = group
|
||||
if flags != None:
|
||||
op["flags"] = flags
|
||||
data = await self._send_command(op, "generate_invite_link", timeout)
|
||||
data = await self._send_command(op, "generate_invite_link", timeout=timeout)
|
||||
if ("result" in data and data["result"].lower() != "ok"):
|
||||
raise exceptions.ServerError(data["result"])
|
||||
del data["tag"]
|
||||
@@ -382,7 +421,7 @@ class Session(object):
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
data = await self._send_command({"action": "users"}, "list_users", timeout)
|
||||
data = await self._send_command({"action": "users"}, "list_users", timeout=timeout)
|
||||
if ("result" in data and data["result"].lower() != "ok"):
|
||||
raise exceptions.ServerError(data["result"])
|
||||
return data["users"]
|
||||
@@ -401,7 +440,7 @@ class Session(object):
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
return (await self._send_command({"action": "wssessioncount"}, "list_user_sessions", timeout))["wssessions"]
|
||||
return (await self._send_command({"action": "wssessioncount"}, "list_user_sessions", timeout=timeout))["wssessions"]
|
||||
|
||||
|
||||
async def list_devices(self, details=False, group=None, meshid=None, timeout=None):
|
||||
@@ -426,23 +465,38 @@ class Session(object):
|
||||
tasks = []
|
||||
async with asyncio.TaskGroup() as tg:
|
||||
if details:
|
||||
tasks.append(tg.create_task(self._send_command_no_response_id({"action": "getDeviceDetails", "type":"json"}, timeout)))
|
||||
tasks.append(tg.create_task(self._send_command_no_response_id({"action": "getDeviceDetails", "type":"json"}, timeout=timeout)))
|
||||
elif group:
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'nodes', "meshname": group}, "list_devices", timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'nodes', "meshname": group}, "list_devices", timeout=timeout)))
|
||||
elif meshid:
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'nodes', "meshid": meshid}, "list_devices", timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'nodes', "meshid": meshid}, "list_devices", timeout=timeout)))
|
||||
else:
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'meshes' }, "list_devices", timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'nodes' }, "list_devices", timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'meshes' }, "list_devices", timeout=timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'nodes' }, "list_devices", timeout=timeout)))
|
||||
|
||||
res0 = tasks[0].result()
|
||||
if "result" in res0:
|
||||
raise exceptions.ServerError(res0["result"])
|
||||
if details:
|
||||
nodes = json.loads(res0["data"])
|
||||
try:
|
||||
nodes = res0["data"]
|
||||
# Accept any number of nested strings, meshcentral is odd
|
||||
while True:
|
||||
try:
|
||||
nodes = json.loads(nodes)
|
||||
except TypeError:
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"Failed to parse device data: {e}")
|
||||
return
|
||||
|
||||
for node in nodes:
|
||||
if node["node"].get("meshid", None):
|
||||
node["node"]["mesh"] = mesh.Mesh(node["node"].get("meshid"), self)
|
||||
if "lastConnect" in node and isinstance(node["lastConnect"], dict):
|
||||
node["node"]["lastconnect"] = node["lastConnect"].get("time")
|
||||
node["node"]["lastaddr"] = node["lastConnect"].get("addr")
|
||||
del node["lastConnect"]
|
||||
details = {}
|
||||
for key, val in node.items():
|
||||
if key != "node":
|
||||
@@ -478,6 +532,24 @@ class Session(object):
|
||||
node["mesh"] = mesh.Mesh(node.get("meshid"), self)
|
||||
return [device.Device(n["_id"], self, **n) for n in nodes]
|
||||
|
||||
async def raw_messages(self):
|
||||
'''
|
||||
Listen to raw messages from the server. These will be strings that have not been parsed at all. Consider this an emergency fallback if meshcentral sends something odd. You will get every message from the websocket.
|
||||
|
||||
Returns:
|
||||
generator(data): A generator which will generate every message the server sends
|
||||
'''
|
||||
event_queue = asyncio.Queue()
|
||||
async def _(data):
|
||||
await event_queue.put(data)
|
||||
self._eventer.on("raw", _)
|
||||
try:
|
||||
while True:
|
||||
data = await event_queue.get()
|
||||
yield data
|
||||
finally:
|
||||
self._eventer.off("raw", _)
|
||||
|
||||
async def events(self, filter=None):
|
||||
'''
|
||||
Listen to events from the server
|
||||
@@ -535,7 +607,7 @@ class Session(object):
|
||||
if limit:
|
||||
cmd["limit"] = limit
|
||||
|
||||
data = await self._send_command(cmd, "list_events", timeout)
|
||||
data = await self._send_command(cmd, "list_events", timeout=timeout)
|
||||
return data["events"]
|
||||
|
||||
async def list_login_tokens(self, timeout=None):
|
||||
@@ -552,7 +624,7 @@ class Session(object):
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
return (await self._send_command_no_response_id({"action": "loginTokens"}, timeout))["loginTokens"]
|
||||
return (await self._send_command_no_response_id({"action": "loginTokens"}, timeout=timeout))["loginTokens"]
|
||||
|
||||
async def add_login_token(self, name, expire=None, timeout=None):
|
||||
'''
|
||||
@@ -571,7 +643,7 @@ class Session(object):
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
cmd = { "action": 'createLoginToken', "name": name, "expire": 0 if not expire else expire }
|
||||
data = await self._send_command_no_response_id(cmd, timeout)
|
||||
data = await self._send_command_no_response_id(cmd, timeout=timeout)
|
||||
del data["action"]
|
||||
return data
|
||||
|
||||
@@ -603,7 +675,7 @@ class Session(object):
|
||||
name = token["tokenUser"]
|
||||
break
|
||||
realnames.append(name)
|
||||
return (await self._send_command_no_response_id({ "action": 'loginTokens', "remove": realnames }, timeout))["loginTokens"]
|
||||
return (await self._send_command_no_response_id({ "action": 'loginTokens', "remove": realnames }, timeout=timeout))["loginTokens"]
|
||||
|
||||
async def add_user(self, name, password=None, randompass=False, domain=None, email=None, emailverified=False, resetpass=False, realname=None, phone=None, rights=None, timeout=None):
|
||||
'''
|
||||
@@ -651,7 +723,7 @@ class Session(object):
|
||||
if isinstance(realname, str):
|
||||
op["realname"] = realname
|
||||
|
||||
data = await self._send_command(op, "add_user", timeout)
|
||||
data = await self._send_command(op, "add_user", timeout=timeout)
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
return True
|
||||
@@ -706,7 +778,7 @@ class Session(object):
|
||||
op["realname"] = realname
|
||||
if realname is True:
|
||||
op["realname"] = ''
|
||||
data = await self._send_command(op, "edit_user", timeout)
|
||||
data = await self._send_command(op, "edit_user", timeout=timeout)
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
return True
|
||||
@@ -732,7 +804,7 @@ class Session(object):
|
||||
elif (self._domain is not None) and ("/" not in userid):
|
||||
userid = f"user/{self._domain}/{userid}"
|
||||
|
||||
data = await self._send_command({ "action": 'deleteuser', "userid": userid }, "remove_user", timeout)
|
||||
data = await self._send_command({ "action": 'deleteuser', "userid": userid }, "remove_user", timeout=timeout)
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
return True
|
||||
@@ -760,7 +832,7 @@ class Session(object):
|
||||
op["domain"] = self._domain
|
||||
elif self._domain is not None:
|
||||
op["domain"] = self._domain
|
||||
data = await self._send_command(op, "add_user_group", timeout)
|
||||
data = await self._send_command(op, "add_user_group", timeout=timeout)
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
|
||||
@@ -795,7 +867,7 @@ class Session(object):
|
||||
|
||||
if (not groupid.startswith("ugrp/")):
|
||||
groupid = f"ugrp//{groupid}"
|
||||
data = await self._send_command({ "action": 'deleteusergroup', "ugrpid": groupid }, "remove_user_group", timeout)
|
||||
data = await self._send_command({ "action": 'deleteusergroup', "ugrpid": groupid }, "remove_user_group", timeout=timeout)
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
return True
|
||||
@@ -814,7 +886,7 @@ class Session(object):
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
r = await self._send_command({"action": "usergroups"}, "list_user_groups", timeout)
|
||||
r = await self._send_command({"action": "usergroups"}, "list_user_groups", timeout=timeout)
|
||||
groups = []
|
||||
for key, val in r["ugroups"].items():
|
||||
val["_id"] = key
|
||||
@@ -888,7 +960,7 @@ class Session(object):
|
||||
async with asyncio.TaskGroup() as tg:
|
||||
tasks.append(tg.create_task(asyncio.wait_for(_(tg), timeout=timeout)))
|
||||
tasks.append(tg.create_task(asyncio.wait_for(__(tg), timeout=timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'addusertousergroup', "ugrpid": groupid, "usernames": usernames}, "add_users_to_user_group", timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'addusertousergroup', "ugrpid": groupid, "usernames": usernames}, "add_users_to_user_group", timeout=timeout)))
|
||||
|
||||
|
||||
res = tasks[2].result()
|
||||
@@ -922,7 +994,7 @@ class Session(object):
|
||||
if (not groupid.startswith("ugrp/")):
|
||||
groupid = f"ugrp//{groupid}"
|
||||
|
||||
data = await self._send_command({ "action": 'removeuserfromusergroup', "ugrpid": groupid, "userid": userid }, "remove_from_user_group", timeout)
|
||||
data = await self._send_command({ "action": 'removeuserfromusergroup', "ugrpid": groupid, "userid": userid }, "remove_from_user_group", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -953,7 +1025,7 @@ class Session(object):
|
||||
if rights is None:
|
||||
rights = 0
|
||||
|
||||
data = await self._send_command({ "action": 'adddeviceuser', "nodeid": nodeid, "userids": userids, "rights": rights}, "add_users_to_device", timeout)
|
||||
data = await self._send_command({ "action": 'adddeviceuser', "nodeid": nodeid, "userids": userids, "rights": rights}, "add_users_to_device", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -981,7 +1053,7 @@ class Session(object):
|
||||
|
||||
userids = [f"user//{u}" if not u.startswith("user//") else u for u in userids]
|
||||
|
||||
data = await self._send_command({ "action": 'adddeviceuser', "nodeid": nodeid, "usernames": userids, "rights": 0, "remove": True }, "remove_users_from_device", timeout)
|
||||
data = await self._send_command({ "action": 'adddeviceuser', "nodeid": nodeid, "usernames": userids, "rights": 0, "remove": True }, "remove_users_from_device", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1018,7 +1090,7 @@ class Session(object):
|
||||
if consent:
|
||||
op["consent"] = consent
|
||||
|
||||
data = await self._send_command(op, "add_device_group", timeout)
|
||||
data = await self._send_command(op, "add_device_group", timeout=timeout)
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
|
||||
@@ -1053,7 +1125,7 @@ class Session(object):
|
||||
op["meshname"] = meshid
|
||||
del op["meshid"]
|
||||
|
||||
data = await self._send_command(op, "remove_device_group", timeout)
|
||||
data = await self._send_command(op, "remove_device_group", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1107,7 +1179,7 @@ class Session(object):
|
||||
if consent is not None:
|
||||
op["consent"] = consent
|
||||
|
||||
data = await self._send_command(op, "edit_device_group", timeout)
|
||||
data = await self._send_command(op, "edit_device_group", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1138,7 +1210,7 @@ class Session(object):
|
||||
op["meshname"] = meshid
|
||||
del op["meshid"]
|
||||
|
||||
data = await self._send_command(op, "move_to_device_group", timeout)
|
||||
data = await self._send_command(op, "move_to_device_group", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1171,7 +1243,7 @@ class Session(object):
|
||||
op["meshname"] = meshid
|
||||
del op["meshid"]
|
||||
|
||||
data = await self._send_command(op, "add_user_to_device_group", timeout)
|
||||
data = await self._send_command(op, "add_user_to_device_group", timeout=timeout)
|
||||
results = data["result"].split(",")
|
||||
out = {}
|
||||
for i, result in enumerate(results):
|
||||
@@ -1213,7 +1285,7 @@ class Session(object):
|
||||
tasks = []
|
||||
async with asyncio.TaskGroup() as tg:
|
||||
for userid in userids:
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'removemeshuser', "userid": userid } | id_obj, "remove_users_from_device_group", timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'removemeshuser', "userid": userid } | id_obj, "remove_users_from_device_group", timeout=timeout)))
|
||||
|
||||
out = {}
|
||||
for i, task in enumerate(tasks):
|
||||
@@ -1247,7 +1319,7 @@ class Session(object):
|
||||
if userid:
|
||||
op["userid"] = userid
|
||||
|
||||
data = await self._send_command(op, "broadcast", timeout)
|
||||
data = await self._send_command(op, "broadcast", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1271,10 +1343,10 @@ class Session(object):
|
||||
'''
|
||||
tasks = []
|
||||
async with asyncio.TaskGroup() as tg:
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'nodes' }, "device_info", timeout)))
|
||||
tasks.append(tg.create_task(self._send_command_no_response_id({ "action": 'getnetworkinfo', "nodeid": nodeid }, timeout)))
|
||||
tasks.append(tg.create_task(self._send_command_no_response_id({ "action": 'lastconnect', "nodeid": nodeid }, timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'getsysinfo', "nodeid": nodeid, "nodeinfo": True }, "device_info", timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'nodes' }, "device_info", timeout=timeout)))
|
||||
tasks.append(tg.create_task(self._send_command_no_response_id({ "action": 'getnetworkinfo', "nodeid": nodeid }, timeout=timeout)))
|
||||
tasks.append(tg.create_task(self._send_command_no_response_id({ "action": 'lastconnect', "nodeid": nodeid }, timeout=timeout)))
|
||||
tasks.append(tg.create_task(self._send_command({ "action": 'getsysinfo', "nodeid": nodeid, "nodeinfo": True }, "device_info", timeout=timeout)))
|
||||
tasks.append(tg.create_task(self.list_device_groups(timeout=timeout)))
|
||||
|
||||
nodes, network, lastconnect, sysinfo, meshes = (_.result() for _ in tasks)
|
||||
@@ -1284,6 +1356,14 @@ class Session(object):
|
||||
if sysinfo is not None and sysinfo.get("node", None):
|
||||
# Node information came with system information
|
||||
node = sysinfo.get("node", None)
|
||||
for meshid, _nodes in nodes["nodes"].items():
|
||||
for _mesh in meshes:
|
||||
if _mesh.meshid == meshid:
|
||||
break
|
||||
else:
|
||||
break
|
||||
if meshid == node["meshid"]:
|
||||
node["mesh"] = _mesh
|
||||
else:
|
||||
# This device does not have system information, get node information from the nodes list.
|
||||
for meshid, _nodes in nodes["nodes"].items():
|
||||
@@ -1298,10 +1378,10 @@ class Session(object):
|
||||
node["meshid"] = meshid
|
||||
if _mesh is not None:
|
||||
node["mesh"] = _mesh
|
||||
sysinfo["node"] = node
|
||||
sysinfo["nodeid"] = nodeid
|
||||
del sysinfo["result"]
|
||||
del sysinfo["noinfo"]
|
||||
break
|
||||
else:
|
||||
continue
|
||||
break
|
||||
if node is None:
|
||||
raise ValueError("Invalid device id")
|
||||
if lastconnect is not None:
|
||||
@@ -1344,7 +1424,7 @@ class Session(object):
|
||||
if consent is not None:
|
||||
op["consent"] = consent
|
||||
|
||||
data = await self._send_command(op, "edit_device", timeout)
|
||||
data = await self._send_command(op, "edit_device", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1398,11 +1478,12 @@ class Session(object):
|
||||
result.setdefault(node, {})["complete"] = True
|
||||
if all(_["complete"] for key, _ in result.items()):
|
||||
break
|
||||
continue
|
||||
elif (event["value"].startswith("Run commands")):
|
||||
continue
|
||||
result[node]["result"].append(event["value"])
|
||||
async def __(command):
|
||||
data = await self._send_command(command, "run_command", timeout)
|
||||
data = await self._send_command(command, "run_command", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1479,7 +1560,7 @@ class Session(object):
|
||||
if isinstance(nodeids, str):
|
||||
nodeids = [nodeids]
|
||||
|
||||
return await self._send_command({ "action": 'wakedevices', "nodeids": nodeids }, "wake_devices", timeout)
|
||||
return await self._send_command({ "action": 'wakedevices', "nodeids": nodeids }, "wake_devices", timeout=timeout)
|
||||
|
||||
async def reset_devices(self, nodeids, timeout=None):
|
||||
'''
|
||||
@@ -1500,7 +1581,7 @@ class Session(object):
|
||||
if isinstance(nodeids, str):
|
||||
nodeids = [nodeids]
|
||||
|
||||
return await self._send_command({ "action": 'poweraction', "nodeids": nodeids, "actiontype": 3 }, "reset_devices", timeout)
|
||||
return await self._send_command({ "action": 'poweraction', "nodeids": nodeids, "actiontype": 3 }, "reset_devices", timeout=timeout)
|
||||
|
||||
async def sleep_devices(self, nodeids, timeout=None):
|
||||
'''
|
||||
@@ -1521,7 +1602,7 @@ class Session(object):
|
||||
if isinstance(nodeids, str):
|
||||
nodeids = [nodeids]
|
||||
|
||||
return await self._send_command({ "action": 'poweraction', "nodeids": nodeids, "actiontype": 4 }, "sleep_devices", timeout)
|
||||
return await self._send_command({ "action": 'poweraction', "nodeids": nodeids, "actiontype": 4 }, "sleep_devices", timeout=timeout)
|
||||
|
||||
async def power_off_devices(self, nodeids, timeout=None):
|
||||
'''
|
||||
@@ -1542,7 +1623,7 @@ class Session(object):
|
||||
if isinstance(nodeids, str):
|
||||
nodeids = [nodeids]
|
||||
|
||||
return await self._send_command({ "action": 'poweraction', "nodeids": nodeids, "actiontype": 2 }, "power_off_devices", timeout)
|
||||
return await self._send_command({ "action": 'poweraction', "nodeids": nodeids, "actiontype": 2 }, "power_off_devices", timeout=timeout)
|
||||
|
||||
async def list_device_shares(self, nodeid, timeout=None):
|
||||
'''
|
||||
@@ -1559,7 +1640,7 @@ class Session(object):
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
data = await self._send_command_no_response_id({ "action": 'deviceShares', "nodeid": nodeid }, timeout)
|
||||
data = await self._send_command_no_response_id({ "action": 'deviceShares', "nodeid": nodeid }, timeout=timeout)
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
|
||||
@@ -1604,7 +1685,7 @@ class Session(object):
|
||||
end = int(start.timestamp())
|
||||
if end <= start:
|
||||
raise ValueError("End time must be ahead of start time")
|
||||
data = await self._send_command({ "action": 'createDeviceShareLink', "nodeid": nodeid, "guestname": name, "p": constants.SharingTypeEnum[type], "consent": consent, "start": start, "end": end }, "add_device_share", timeout)
|
||||
data = await self._send_command({ "action": 'createDeviceShareLink', "nodeid": nodeid, "guestname": name, "p": constants.SharingTypeEnum[type], "consent": consent, "start": start, "end": end }, "add_device_share", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1633,7 +1714,7 @@ class Session(object):
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
data = await self._send_command({ "action": 'removeDeviceShare', "nodeid": nodeid, "publicid": shareid }, "remove_device_share", timeout)
|
||||
data = await self._send_command({ "action": 'removeDeviceShare', "nodeid": nodeid, "publicid": shareid }, "remove_device_share", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1669,12 +1750,13 @@ class Session(object):
|
||||
tasks = []
|
||||
async with asyncio.TaskGroup() as tg:
|
||||
tasks.append(tg.create_task(asyncio.wait_for(_(), timeout=timeout)))
|
||||
tasks.append({ "action": 'msg', "type": 'openUrl', "nodeid": nodeid, "url": url }, "device_open_url", timeout)
|
||||
tasks.append({ "action": 'msg', "type": 'openUrl', "nodeid": nodeid, "url": url }, "device_open_url", timeout=timeout)
|
||||
|
||||
|
||||
success = tasks[0].result()
|
||||
res = tasks[1].result()
|
||||
success = tasks[2].result()
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
if res.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
|
||||
if not success:
|
||||
@@ -1701,7 +1783,7 @@ class Session(object):
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
data = await self._send_command({ "action": 'msg', "type": 'messagebox', "nodeid": nodeid, "title": title, "msg": message }, "device_message", timeout)
|
||||
data = await self._send_command({ "action": 'msg', "type": 'messagebox', "nodeid": nodeid, "title": title, "msg": message }, "device_message", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1731,7 +1813,7 @@ class Session(object):
|
||||
if isinstance(nodeids, str):
|
||||
nodeids = [nodeids]
|
||||
|
||||
data = self._send_command({ "action": 'toast', "nodeids": nodeids, "title": "MeshCentral", "msg": message }, "device_toast", timeout)
|
||||
data = self._send_command({ "action": 'toast', "nodeids": nodeids, "title": "MeshCentral", "msg": message }, "device_toast", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
@@ -1757,15 +1839,16 @@ class Session(object):
|
||||
raise ValueError("No user or session given")
|
||||
await self._message_queue.put(json.dumps({"action": "interuser", "data": data, "sessionid": session, "userid": user}))
|
||||
|
||||
async def upload(self, nodeid, source, target, unique_file_tunnel=False, timeout=None):
|
||||
async def upload(self, node, source, target, unique_file_tunnel=False, timeout=None):
|
||||
'''
|
||||
Upload a stream to a device. This creates an _File and destroys it every call. If you need to upload multiple files, use {@link Session#file_explorer} instead.
|
||||
Upload a stream to a device.
|
||||
|
||||
Args:
|
||||
nodeid (str): Unique id to upload stream to
|
||||
node (~meshctrl.device.Device|str): Device or id of device to which to upload the file. If it is a device, it must have a ~meshctrl.mesh.Mesh device associated with it (the default). If it is a string, the device will be fetched prior to tunnel creation.
|
||||
source (io.IOBase): An IO instance from which to read the data. Must be open for reading.
|
||||
target (str): Path which to upload stream to on remote device
|
||||
unique_file_tunnel (bool): True: Create a unique :py:class:`~meshctrl.files.Files` for this call, which will be cleaned up on return, else use cached or cache :py:class:`~meshctrl.files.Files`
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.FileTransferError`: File transfer failed. Info available on the `stats` property
|
||||
@@ -1774,23 +1857,26 @@ class Session(object):
|
||||
Returns:
|
||||
dict: {result: bool whether upload succeeded, size: number of bytes uploaded}
|
||||
'''
|
||||
if not isinstance(node, device.Device):
|
||||
node = await self.device_info(node)
|
||||
if unique_file_tunnel:
|
||||
async with self.file_explorer(nodeid) as files:
|
||||
return await files.upload(source, target)
|
||||
async with self.file_explorer(node) as files:
|
||||
return await files.upload(source, target, timeout=timeout)
|
||||
else:
|
||||
files = await self._cached_file_explorer(nodeid, nodeid)
|
||||
files = await self._cached_file_explorer(node, node.nodeid)
|
||||
return await files.upload(source, target, timeout=timeout)
|
||||
|
||||
|
||||
async def upload_file(self, nodeid, filepath, target, unique_file_tunnel=False, timeout=None):
|
||||
async def upload_file(self, node, filepath, target, unique_file_tunnel=False, timeout=None):
|
||||
'''
|
||||
Friendly wrapper around :py:class:`~meshctrl.session.Session.upload` to upload from a filepath. Creates a ReadableStream and calls upload.
|
||||
|
||||
Args:
|
||||
nodeid (str): Unique id to upload file to
|
||||
node (~meshctrl.device.Device|str): Device or id of device to which to upload the file. If it is a device, it must have a ~meshctrl.mesh.Mesh device associated with it (the default). If it is a string, the device will be fetched prior to tunnel creation.
|
||||
filepath (str): Path from which to read the data
|
||||
target (str): Path which to upload file to on remote device
|
||||
unique_file_tunnel (bool): True: Create a unique :py:class:`~meshctrl.files.Files` for this call, which will be cleaned up on return, else use cached or cache :py:class:`~meshctrl.files.Files`
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.FileTransferError`: File transfer failed. Info available on the `stats` property
|
||||
@@ -1800,72 +1886,98 @@ class Session(object):
|
||||
dict: {result: bool whether upload succeeded, size: number of bytes uploaded}
|
||||
'''
|
||||
with open(filepath, "rb") as f:
|
||||
return await self.upload(nodeid, f, target, unique_file_tunnel, timeout=timeout)
|
||||
return await self.upload(node, f, target, unique_file_tunnel, timeout=timeout)
|
||||
|
||||
async def download(self, nodeid, source, target=None, unique_file_tunnel=False, timeout=None):
|
||||
async def download(self, node, source, target=None, skip_http_attempt=False, skip_ws_attempt=False, unique_file_tunnel=False, timeout=None):
|
||||
'''
|
||||
Download a file from a device into a writable stream. This creates an :py:class:`~meshctrl.files.Files` and destroys it every call. If you need to upload multiple files, use :py:class:`~meshctrl.session.Session.file_explorer` instead.
|
||||
|
||||
Args:
|
||||
nodeid (str): Unique id to download file from
|
||||
node (~meshctrl.device.Device|str): Device or id of device from which to download the file. If it is a device, it must have a ~meshctrl.mesh.Mesh device associated with it (the default). If it is a string, the device will be fetched prior to tunnel creation.
|
||||
source (str): Path from which to download from device
|
||||
target (io.IOBase): Stream to which to write data. If None, create new BytesIO which is both readable and writable.
|
||||
skip_http_attempt (bool): Meshcentral has a way to download files through http(s) instead of through the websocket. This method tends to be much faster than using the websocket, so we try it first. Setting this to True will skip that attempt and just use the established websocket connection.
|
||||
skip_ws_attempt (bool): Like skip_http_attempt, except just throw an error if the http attempt fails instead of trying with the websocket
|
||||
unique_file_tunnel (bool): True: Create a unique :py:class:`~meshctrl.files.Files` for this call, which will be cleaned up on return, else use cached or cache :py:class:`~meshctrl.files.Files`
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.FileTransferError`: File transfer failed. Info available on the `stats` property
|
||||
:py:class:`~meshctrl.exceptions.FileTransferCancelled`: File transfer cancelled. Info available on the `stats` property
|
||||
|
||||
Returns:
|
||||
io.IOBase: The stream which has been downloaded into. Cursor will be at the end of the stream.
|
||||
io.IOBase: The stream which has been downloaded into. Cursor will be at the beginning of where the file is downloaded.
|
||||
'''
|
||||
if not isinstance(node, device.Device):
|
||||
node = await self.device_info(node)
|
||||
if target is None:
|
||||
target = io.BytesIO()
|
||||
start = target.tell()
|
||||
if unique_file_tunnel:
|
||||
async with self.file_explorer(nodeid) as files:
|
||||
await files.download(source, target)
|
||||
async with self.file_explorer(node) as files:
|
||||
await files.download(source, target, skip_http_attempt=skip_http_attempt, skip_ws_attempt=skip_ws_attempt, timeout=timeout)
|
||||
target.seek(start)
|
||||
return target
|
||||
else:
|
||||
files = await self._cached_file_explorer(nodeid, nodeid)
|
||||
await files.download(source, target, timeout=timeout)
|
||||
files = await self._cached_file_explorer(node, node.nodeid)
|
||||
await files.download(source, target, skip_http_attempt=skip_http_attempt, skip_ws_attempt=skip_ws_attempt, timeout=timeout)
|
||||
target.seek(start)
|
||||
return target
|
||||
|
||||
async def download_file(self, nodeid, source, filepath, unique_file_tunnel=False, timeout=None):
|
||||
async def download_file(self, node, source, filepath, skip_http_attempt=False, skip_ws_attempt=False, unique_file_tunnel=False, timeout=None):
|
||||
'''
|
||||
Friendly wrapper around :py:class:`~meshctrl.session.Session.download` to download to a filepath. Creates a WritableStream and calls download.
|
||||
|
||||
Args:
|
||||
nodeid (str): Unique id to download file from
|
||||
node (~meshctrl.device.Device|str): Device or id of device from which to download the file. If it is a device, it must have a ~meshctrl.mesh.Mesh device associated with it (the default). If it is a string, the device will be fetched prior to tunnel creation.
|
||||
source (str): Path from which to download from device
|
||||
filepath (str): Path to which to download data
|
||||
skip_http_attempt (bool): Meshcentral has a way to download files through http(s) instead of through the websocket. This method tends to be much faster than using the websocket, so we try it first. Setting this to True will skip that attempt and just use the established websocket connection.
|
||||
skip_ws_attempt (bool): Like skip_http_attempt, except just throw an error if the http attempt fails instead of trying with the websocket
|
||||
unique_file_tunnel (bool): True: Create a unique :py:class:`~meshctrl.files.Files` for this call, which will be cleaned up on return, else use cached or cache :py:class:`~meshctrl.files.Files`
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.FileTransferError`: File transfer failed. Info available on the `stats` property
|
||||
:py:class:`~meshctrl.exceptions.FileTransferCancelled`: File transfer cancelled. Info available on the `stats` property
|
||||
|
||||
Returns:
|
||||
io.IOBase: The stream which has been downloaded into. Cursor will be at the end of the stream.
|
||||
None
|
||||
'''
|
||||
with open(filepath, "wb") as f:
|
||||
return await self.download(nodeid, source, f, unique_file_tunnel, timeout=timeout)
|
||||
await self.download(node, source, f, skip_http_attempt=skip_http_attempt, skip_ws_attempt=skip_ws_attempt, unique_file_tunnel=unique_file_tunnel, timeout=timeout)
|
||||
|
||||
async def _cached_file_explorer(self, nodeid, _id):
|
||||
async def _cached_file_explorer(self, node, _id):
|
||||
if (_id not in self._file_tunnels or not self._file_tunnels[_id].alive):
|
||||
self._file_tunnels[_id] = self.file_explorer(nodeid)
|
||||
self._file_tunnels[_id] = await self.file_explorer(node).__aenter__()
|
||||
await self._file_tunnels[_id].initialized.wait()
|
||||
return self._file_tunnels[_id]
|
||||
|
||||
def file_explorer(self, nodeid):
|
||||
def file_explorer(self, node):
|
||||
'''
|
||||
Create, initialize, and return an :py:class:`~meshctrl.files.Files` object for the given node
|
||||
|
||||
Args:
|
||||
nodeid (str): Unique id on which to open file explorer
|
||||
node (~meshctrl.device.Device|str): Device or id of device on which to open file explorer. If it is a device, it must have a ~meshctrl.mesh.Mesh device associated with it (the default). If it is a string, the device will be fetched prior to tunnel creation.
|
||||
|
||||
Returns:
|
||||
:py:class:`~meshctrl.files.Files`: A newly initialized file explorer.
|
||||
'''
|
||||
return files.Files(self, nodeid)
|
||||
'''
|
||||
return _FileExplorerWrapper(self, node)
|
||||
|
||||
|
||||
|
||||
# This is a little yucky, but I can't get a good API otherwise. Since Tunnel objects are only useable as context managers anyway, this should be fine.
|
||||
class _FileExplorerWrapper:
|
||||
def __init__(self, session, node):
|
||||
self.session = session
|
||||
self.node = node
|
||||
self._files = None
|
||||
|
||||
async def __aenter__(self):
|
||||
if not isinstance(self.node, device.Device):
|
||||
self.node = await self.session.device_info(self.node)
|
||||
self._files = files.Files(self.session, self.node)
|
||||
return await self._files.__aenter__()
|
||||
|
||||
async def __aexit__(self, exc_t, exc_v, exc_tb):
|
||||
return await self._files.__aexit__(exc_t, exc_v, exc_tb)
|
||||
|
||||
@@ -78,7 +78,6 @@ class Shell(tunnel.Tunnel):
|
||||
read_bytes = 0
|
||||
while True:
|
||||
d = self._buffer.read1(length-read_bytes if length is not None else -1)
|
||||
# print(f"read: {d}")
|
||||
read_bytes += len(d)
|
||||
ret.append(d)
|
||||
if length is not None and read_bytes >= length:
|
||||
@@ -163,7 +162,6 @@ class SmartShell(object):
|
||||
command += "\n"
|
||||
await self._shell.write(command)
|
||||
data = await self._shell.expect(self._regex, timeout=timeout)
|
||||
print(repr(data))
|
||||
return data[:self._compiled_regex.search(data).span()[0]]
|
||||
|
||||
@property
|
||||
@@ -178,14 +176,18 @@ class SmartShell(object):
|
||||
def initialized(self):
|
||||
return self._shell.initialized
|
||||
|
||||
@property
|
||||
def _socket_open(self):
|
||||
return self._shell._socket_open
|
||||
|
||||
async def close(self):
|
||||
await self._init_task
|
||||
await asyncio.wait_for(self._init_task, 10)
|
||||
return await self._shell.close()
|
||||
|
||||
async def __aenter__(self):
|
||||
await self._init_task
|
||||
await self._shell.__aenter__()
|
||||
await asyncio.wait_for(self._init_task, 10)
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *args):
|
||||
return await self._shell.__aexit__(*args)
|
||||
await self.close()
|
||||
|
||||
@@ -4,6 +4,8 @@ import websockets.asyncio
|
||||
import websockets.asyncio.client
|
||||
import asyncio
|
||||
import ssl
|
||||
from python_socks.async_.asyncio import Proxy
|
||||
import urllib
|
||||
from . import exceptions
|
||||
from . import util
|
||||
from . import constants
|
||||
@@ -43,26 +45,18 @@ class Tunnel(object):
|
||||
|
||||
async def _main_loop(self):
|
||||
try:
|
||||
authcookie = await self._session._send_command_no_response_id({ "action":"authcookie" })
|
||||
self._authcookie = await self._session._send_command_no_response_id({ "action":"authcookie" })
|
||||
|
||||
options = {}
|
||||
if self._session._ignore_ssl:
|
||||
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
||||
ssl_context.check_hostname = False
|
||||
ssl_context.verify_mode = ssl.CERT_NONE
|
||||
options = { "ssl": ssl_context }
|
||||
if self._session._ssl_context is not None:
|
||||
options["ssl"] = self._session._ssl_context
|
||||
|
||||
# Setup the HTTP proxy if needed
|
||||
# if (self._session._proxy != None):
|
||||
# options.agent = new https_proxy_agent(urllib.parse(this._proxy))
|
||||
|
||||
if (self.node_id.split('/') != 3) and (self._session._currentDomain is not None):
|
||||
self.node_id = f"node/{self._session._currentDomain}/{self.node_id}"
|
||||
if (len(self.node_id.split('/')) != 3):
|
||||
self.node_id = f"node/{self._session._currentDomain or ''}/{self.node_id}"
|
||||
|
||||
self._tunnel_id = util._get_random_hex(6)
|
||||
|
||||
initialize_tunnel_response = await self._session._send_command({ "action": 'msg', "nodeid": self.node_id, "type": 'tunnel', "usage": 1, "value": '*/meshrelay.ashx?p=' + str(self._protocol) + '&nodeid=' + self.node_id + '&id=' + self._tunnel_id + '&rauth=' + authcookie["rcookie"] }, "initialize_tunnel")
|
||||
|
||||
initialize_tunnel_response = await self._session._send_command({ "action": 'msg', "nodeid": self.node_id, "type": 'tunnel', "usage": 1, "value": '*/meshrelay.ashx?p=' + str(self._protocol) + '&nodeid=' + self.node_id + '&id=' + self._tunnel_id + '&rauth=' + self._authcookie["rcookie"] }, "initialize_tunnel")
|
||||
if initialize_tunnel_response.get("result", None) != "OK":
|
||||
self._main_loop_error = exceptions.ServerError(initialize_tunnel_response.get("result", "Failed to initialize remote tunnel"))
|
||||
self._socket_open.clear()
|
||||
@@ -70,16 +64,10 @@ class Tunnel(object):
|
||||
self.initialized.set()
|
||||
return
|
||||
|
||||
self.url = self._session.url.replace('/control.ashx', '/meshrelay.ashx?browser=1&p=' + str(self._protocol) + '&nodeid=' + self.node_id + '&id=' + self._tunnel_id + '&auth=' + authcookie["cookie"])
|
||||
self.url = self._session.url.replace('/control.ashx', '/meshrelay.ashx?browser=1&p=' + str(self._protocol) + '&nodeid=' + self.node_id + '&id=' + self._tunnel_id + '&auth=' + self._authcookie["cookie"])
|
||||
|
||||
# headers = websockets.datastructures.Headers()
|
||||
|
||||
# if (self._password):
|
||||
# token = self._token if self._token else b""
|
||||
# headers['x-meshauth'] = (base64.b64encode(self._user.encode()) + b',' + base64.b64encode(self._password.encode()) + token).decode()
|
||||
|
||||
# options["additional_headers"] = headers
|
||||
async for websocket in websockets.asyncio.client.connect(self.url, process_exception=util._process_websocket_exception, **options):
|
||||
async for websocket in websockets.asyncio.client.connect(self.url, proxy=self._session._proxy, process_exception=util._process_websocket_exception, **options):
|
||||
self.alive = True
|
||||
self._socket_open.set()
|
||||
try:
|
||||
@@ -89,7 +77,6 @@ class Tunnel(object):
|
||||
except* websockets.ConnectionClosed as e:
|
||||
self._socket_open.clear()
|
||||
if not self.auto_reconnect:
|
||||
self.alive = False
|
||||
raise
|
||||
except* Exception as eg:
|
||||
self.alive = False
|
||||
@@ -104,4 +91,4 @@ class Tunnel(object):
|
||||
await websocket.send(message)
|
||||
|
||||
async def _listen_data_task(self, websocket):
|
||||
raise NotImplementedError("Listen data not implemented")
|
||||
raise NotImplementedError("Listen data not implemented")
|
||||
|
||||
@@ -9,6 +9,8 @@ import re
|
||||
import websockets
|
||||
import ssl
|
||||
import functools
|
||||
import urllib
|
||||
import python_socks
|
||||
from . import exceptions
|
||||
|
||||
def _encode_cookie(o, key):
|
||||
@@ -137,19 +139,31 @@ def compare_dict(dict1, dict2):
|
||||
return False
|
||||
|
||||
def _check_socket(f):
|
||||
@functools.wraps(f)
|
||||
async def wrapper(self, *args, **kwargs):
|
||||
await self.initialized.wait()
|
||||
async def _check_errs(self):
|
||||
if not self.alive and self._main_loop_error is not None:
|
||||
raise self._main_loop_error
|
||||
elif not self.alive:
|
||||
elif not self.alive and self.initialized.is_set():
|
||||
raise exceptions.SocketError("Socket Closed")
|
||||
return await f(self, *args, **kwargs)
|
||||
|
||||
@functools.wraps(f)
|
||||
async def wrapper(self, *args, **kwargs):
|
||||
try:
|
||||
await asyncio.wait_for(self.initialized.wait(), 10)
|
||||
await _check_errs(self)
|
||||
await asyncio.wait_for(self._socket_open.wait(), 10)
|
||||
finally:
|
||||
await _check_errs(self)
|
||||
return await f(self, *args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
def _process_websocket_exception(exc):
|
||||
tmp = websockets.asyncio.client.process_exception(exc)
|
||||
# SSLVerification error is a subclass of OSError, but doesn't make sense no retry, so we need to handle it separately.
|
||||
# SSLVerification error is a subclass of OSError, but doesn't make sense to retry, so we need to handle it separately.
|
||||
if isinstance(exc, (ssl.SSLCertVerificationError, TimeoutError)):
|
||||
return exc
|
||||
return tmp
|
||||
if isinstance(exc, python_socks._errors.ProxyError):
|
||||
return None
|
||||
# Proxy errors show up like this now, and it's default to error out. Handle explicitly.
|
||||
if isinstance(exc, websockets.exceptions.InvalidProxyMessage):
|
||||
return None
|
||||
return tmp
|
||||
3
tests/.gitignore
vendored
3
tests/.gitignore
vendored
@@ -1 +1,2 @@
|
||||
/data
|
||||
/data
|
||||
/environment/scripts/meshcentral/users.json
|
||||
@@ -43,10 +43,8 @@ class Agent(object):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_t, exc_v, exc_tb):
|
||||
try:
|
||||
requests.post("{self._clienturl}/remove-agent/{self.nodeid}")
|
||||
except:
|
||||
pass
|
||||
requests.post(f"{self._clienturl}/remove-agent/{self.nodeid}")
|
||||
|
||||
|
||||
class TestEnvironment(object):
|
||||
def __init__(self):
|
||||
@@ -54,15 +52,23 @@ class TestEnvironment(object):
|
||||
self._subp = None
|
||||
self.mcurl = "wss://localhost:8086"
|
||||
self.clienturl = "http://localhost:5000"
|
||||
self._dockerurl = "host.docker.internal:8086"
|
||||
self.dockerurl = "host.docker.internal:8086"
|
||||
self.proxyurl = "http://localhost:3128"
|
||||
|
||||
def __enter__(self):
|
||||
global _docker_process
|
||||
if _docker_process is not None:
|
||||
self._subp = _docker_process
|
||||
return self
|
||||
self._subp = _docker_process = subprocess.Popen(["docker", "compose", "up", "--build", "--force-recreate", "--no-deps"], stdout=subprocess.DEVNULL, cwd=thisdir)
|
||||
timeout = 30
|
||||
# Destroy the env in case it wasn't killed correctly last time.
|
||||
subprocess.check_call(["docker", "compose", "down"], stdout=subprocess.DEVNULL, cwd=thisdir)
|
||||
self._subp = _docker_process = subprocess.Popen(["docker", "compose", "up", "--build", "--force-recreate", "--no-deps"], cwd=thisdir)
|
||||
if not self._wait_for_meshcentral():
|
||||
self.__exit__(None, None, None)
|
||||
raise Exception("Failed to create docker instance")
|
||||
return self
|
||||
|
||||
def _wait_for_meshcentral(self, timeout=30):
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
@@ -79,16 +85,23 @@ class TestEnvironment(object):
|
||||
pass
|
||||
time.sleep(1)
|
||||
else:
|
||||
self.__exit__(None, None, None)
|
||||
raise Exception("Failed to create docker instance")
|
||||
return self
|
||||
|
||||
return False
|
||||
return True
|
||||
|
||||
def __exit__(self, exc_t, exc_v, exc_tb):
|
||||
pass
|
||||
|
||||
def create_agent(self, meshid):
|
||||
return Agent(meshid, self.mcurl, self.clienturl, self._dockerurl)
|
||||
return Agent(meshid, self.mcurl, self.clienturl, self.dockerurl)
|
||||
|
||||
# Restart our docker instances, to test reconnect code.
|
||||
def restart_mesh(self):
|
||||
subprocess.check_call(["docker", "container", "restart", "meshctrl-meshcentral"], stdout=subprocess.DEVNULL, cwd=thisdir)
|
||||
assert self._wait_for_meshcentral(), "Failed to restart docker instance"
|
||||
|
||||
def restart_proxy(self):
|
||||
subprocess.check_call(["docker", "container", "restart", "meshctrl-squid"], stdout=subprocess.DEVNULL, cwd=thisdir)
|
||||
|
||||
|
||||
def _kill_docker_process():
|
||||
if _docker_process is not None:
|
||||
|
||||
@@ -19,9 +19,9 @@ services:
|
||||
# # mongodb data-directory - A must for data persistence
|
||||
# - ./meshcentral/mongodb_data:/data/db
|
||||
networks:
|
||||
- meshctrl
|
||||
- meshctrl
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
meshcentral:
|
||||
restart: always
|
||||
@@ -49,4 +49,21 @@ services:
|
||||
healthcheck:
|
||||
test: curl -k --fail https://localhost:443/ || exit 1
|
||||
interval: 5s
|
||||
timeout: 120s
|
||||
timeout: 120s
|
||||
|
||||
squid:
|
||||
image: ubuntu/squid:latest
|
||||
restart: unless-stopped
|
||||
container_name: meshctrl-squid
|
||||
ports:
|
||||
- 3128:3128
|
||||
|
||||
networks:
|
||||
- meshctrl
|
||||
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
volumes:
|
||||
- ./config/squid/conf.d:/etc/squid/conf.d
|
||||
- ./config/squid/squid.conf:/etc/squid/squid.conf
|
||||
4
tests/environment/config/meshcentral/overrides/.gitignore
vendored
Normal file
4
tests/environment/config/meshcentral/overrides/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# Ignore everything in this directory
|
||||
*
|
||||
# Except this file
|
||||
!.gitignore
|
||||
11
tests/environment/config/squid/conf.d/meshctrl.conf
Normal file
11
tests/environment/config/squid/conf.d/meshctrl.conf
Normal file
@@ -0,0 +1,11 @@
|
||||
# Logs are managed by logrotate on Debian
|
||||
logfile_rotate 0
|
||||
|
||||
acl all src all
|
||||
acl Safe_ports port 8086
|
||||
acl SSS_ports port 8086
|
||||
http_access allow all
|
||||
debug_options ALL,0 85,2 88,2
|
||||
|
||||
# Set max_filedescriptors to avoid using system's RLIMIT_NOFILE. See LP: #1978272
|
||||
max_filedescriptors 1024
|
||||
9350
tests/environment/config/squid/squid.conf
Normal file
9350
tests/environment/config/squid/squid.conf
Normal file
File diff suppressed because it is too large
Load Diff
@@ -3,5 +3,6 @@ RUN apk add curl
|
||||
RUN apk add python3
|
||||
WORKDIR /opt/meshcentral/
|
||||
COPY ./scripts/meshcentral ./scripts
|
||||
COPY ./meshcentral/data /opt/meshcentral/meshcentral-data
|
||||
COPY ./config/meshcentral/data /opt/meshcentral/meshcentral-data
|
||||
COPY ./config/meshcentral/overrides /opt/meshcentral/meshcentral
|
||||
CMD ["python3", "/opt/meshcentral/scripts/create_users.py"]
|
||||
@@ -1 +0,0 @@
|
||||
{"admin": "3U6zP4iIes5ISH15XxjYLjJcCdw9jU0m", "privileged": "aiIO0zLMGsU7++FYVDNxhlpYlZ1andRB", "unprivileged": "Cz9OMV1wkVd9pXdWi4lkBAAu6TMt43MA"}
|
||||
@@ -1,6 +1,6 @@
|
||||
requests
|
||||
pytest-asyncio
|
||||
cffi==1.17.1
|
||||
cryptography==43.0.3
|
||||
cryptography~=44.0.1
|
||||
pycparser==2.22
|
||||
websockets==13.1
|
||||
websockets~=15.0.0
|
||||
@@ -5,9 +5,10 @@ import meshctrl
|
||||
import requests
|
||||
import io
|
||||
import random
|
||||
import time
|
||||
|
||||
async def test_commands(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
async with meshctrl.Session("wss://" + env.dockerurl, user="admin", password=env.users["admin"], ignore_ssl=True, proxy=env.proxyurl) as admin_session:
|
||||
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
||||
try:
|
||||
with env.create_agent(mesh.short_meshid) as agent:
|
||||
@@ -52,8 +53,20 @@ async def test_commands(env):
|
||||
finally:
|
||||
assert (await admin_session.remove_device_group(mesh.meshid, timeout=10)), "Failed to remove device group"
|
||||
|
||||
async def test_os_proxy_bypass():
|
||||
os.environ["no_proxy"] = "*"
|
||||
import urllib
|
||||
import urllib.request
|
||||
os_proxies = urllib.request.getproxies()
|
||||
meshctrl_proxies = meshctrl.files.urllib.request.getproxies()
|
||||
print(f"os_proxies: {os_proxies}")
|
||||
print(f"meshctrl_proxies: {meshctrl_proxies}")
|
||||
assert meshctrl_proxies.get("no", None) == None, "Meshctrl is using system proxies"
|
||||
assert os_proxies.get("no", None) == "*", "System is using meshctrl proxies"
|
||||
assert os_proxies != meshctrl_proxies, "Override didn't work"
|
||||
|
||||
async def test_upload_download(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
async with meshctrl.Session("wss://" + env.dockerurl, user="admin", password=env.users["admin"], ignore_ssl=True, proxy=env.proxyurl) as admin_session:
|
||||
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
||||
try:
|
||||
with env.create_agent(mesh.short_meshid) as agent:
|
||||
@@ -69,7 +82,7 @@ async def test_upload_download(env):
|
||||
else:
|
||||
break
|
||||
|
||||
randdata = random.randbytes(2000000)
|
||||
randdata = random.randbytes(20000000)
|
||||
upfilestream = io.BytesIO(randdata)
|
||||
downfilestream = io.BytesIO()
|
||||
|
||||
@@ -78,7 +91,7 @@ async def test_upload_download(env):
|
||||
async with admin_session.file_explorer(agent.nodeid) as files:
|
||||
r = await files.upload(upfilestream, f"{pwd}/test", timeout=5)
|
||||
print("\ninfo files_upload: {}\n".format(r))
|
||||
assert r["result"] == "success", "Upload failed"
|
||||
assert r["result"] == True, "Upload failed"
|
||||
assert r["size"] == len(randdata), "Uploaded wrong number of bytes"
|
||||
for f in await files.ls(pwd, timeout=5):
|
||||
if f["n"] == "test" and f["t"] == meshctrl.constants.FileType.FILE:
|
||||
@@ -95,10 +108,23 @@ async def test_upload_download(env):
|
||||
else:
|
||||
raise Exception("Uploaded file not found")
|
||||
|
||||
r = await files.download(f"{pwd}/test", downfilestream, timeout=5)
|
||||
start = time.perf_counter()
|
||||
r = await files.download(f"{pwd}/test", downfilestream, skip_ws_attempt=True, timeout=5)
|
||||
print("\ninfo files_download: {}\n".format(r))
|
||||
assert r["result"] == "success", "Domnload failed"
|
||||
assert r["result"] == True, "Download failed"
|
||||
assert r["size"] == len(randdata), "Downloaded wrong number of bytes"
|
||||
print(f"http download time: {time.perf_counter()-start}")
|
||||
|
||||
downfilestream.seek(0)
|
||||
assert downfilestream.read() == randdata, "Got wrong data back"
|
||||
downfilestream.seek(0)
|
||||
|
||||
start = time.perf_counter()
|
||||
r = await files.download(f"{pwd}/test", downfilestream, skip_http_attempt=True, timeout=5)
|
||||
print("\ninfo files_download: {}\n".format(r))
|
||||
assert r["result"] == True, "Download failed"
|
||||
assert r["size"] == len(randdata), "Downloaded wrong number of bytes"
|
||||
print(f"ws download time: {time.perf_counter()-start}")
|
||||
|
||||
downfilestream.seek(0)
|
||||
assert downfilestream.read() == randdata, "Got wrong data back"
|
||||
|
||||
@@ -8,16 +8,48 @@ import ssl
|
||||
import requests
|
||||
|
||||
async def test_sanity(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True) as s:
|
||||
async with meshctrl.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True) as s:
|
||||
got_pong = asyncio.Event()
|
||||
async def _():
|
||||
async for raw in s.raw_messages():
|
||||
if raw == '{action:"pong"}':
|
||||
got_pong.set()
|
||||
break
|
||||
ping_task = None
|
||||
async with asyncio.TaskGroup() as tg:
|
||||
tg.create_task(asyncio.wait_for(_(), timeout=5))
|
||||
tg.create_task(asyncio.wait_for(got_pong.wait(), timeout=5))
|
||||
ping_task = tg.create_task(s.ping(timeout=10))
|
||||
print("\ninfo ping: {}\n".format(ping_task.result()))
|
||||
print("\ninfo user_info: {}\n".format(await s.user_info()))
|
||||
print("\ninfo server_info: {}\n".format(await s.server_info()))
|
||||
pass
|
||||
|
||||
async def test_proxy(env):
|
||||
async with meshctrl.Session("wss://" + env.dockerurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True, proxy=env.proxyurl) as s:
|
||||
pass
|
||||
|
||||
async def test_ssl(env):
|
||||
try:
|
||||
async with meshctrl.session.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=False) as s:
|
||||
async with meshctrl.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=False) as s:
|
||||
pass
|
||||
except* ssl.SSLCertVerificationError:
|
||||
pass
|
||||
else:
|
||||
raise Exception("Invalid SSL certificate accepted")
|
||||
raise Exception("Invalid SSL certificate accepted")
|
||||
|
||||
async def test_urlparse():
|
||||
# This tests the url port adding necessitated by python-socks. Our test environment doesn't use 443, so this is just a quick sanity test.
|
||||
try:
|
||||
async with meshctrl.Session("wss://localhost", user="unprivileged", password="Not a real password", ignore_ssl=True) as s:
|
||||
pass
|
||||
except* asyncio.TimeoutError:
|
||||
#We're not running a server, so timeout is our expected outcome
|
||||
pass
|
||||
|
||||
# This tests our check for wss/ws url schemes
|
||||
try:
|
||||
async with meshctrl.Session("https://localhost", user="unprivileged", password="Not a real password", ignore_ssl=True) as s:
|
||||
pass
|
||||
except* ValueError:
|
||||
pass
|
||||
@@ -5,11 +5,13 @@ import meshctrl
|
||||
import requests
|
||||
import random
|
||||
import io
|
||||
import traceback
|
||||
import time
|
||||
thisdir = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
async def test_admin(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session,\
|
||||
meshctrl.session.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session:
|
||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session,\
|
||||
meshctrl.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session:
|
||||
admin_users = await admin_session.list_users(timeout=10)
|
||||
print("\ninfo list_users: {}\n".format(admin_users))
|
||||
try:
|
||||
@@ -29,27 +31,71 @@ async def test_admin(env):
|
||||
assert len(no_sessions.keys()) == 0, "non-admin has admin acess"
|
||||
|
||||
assert len(admin_users) == len(env.users.keys()), "Admin cannot see correct number of users"
|
||||
assert len(admin_sessions) == 2, "Admin cannot see correct number of oser sessions"
|
||||
assert len(admin_sessions) == 2, "Admin cannot see correct number of user sessions"
|
||||
|
||||
async def test_auto_reconnect(env):
|
||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True, auto_reconnect=True) as admin_session:
|
||||
env.restart_mesh()
|
||||
await asyncio.sleep(10)
|
||||
await admin_session.ping(timeout=10)
|
||||
|
||||
# As above, but with proxy
|
||||
async with meshctrl.Session("wss://" + env.dockerurl, user="admin", password=env.users["admin"], ignore_ssl=True, auto_reconnect=True, proxy=env.proxyurl) as admin_session:
|
||||
|
||||
env.restart_mesh()
|
||||
for i in range(3):
|
||||
try:
|
||||
await admin_session.ping(timeout=10)
|
||||
except* Exception as e:
|
||||
print("".join(traceback.format_exception(e)))
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise Exception("Failed to reconnect")
|
||||
|
||||
env.restart_proxy()
|
||||
for i in range(3):
|
||||
try:
|
||||
await admin_session.ping(timeout=10)
|
||||
except* Exception as e:
|
||||
print("".join(traceback.format_exception(e)))
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise Exception("Failed to reconnect")
|
||||
|
||||
|
||||
async def test_users(env):
|
||||
try:
|
||||
async with meshctrl.session.Session(env.mcurl[3:], user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
async with meshctrl.Session(env.mcurl[3:], user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
pass
|
||||
except* ValueError:
|
||||
pass
|
||||
else:
|
||||
raise Exception("Connected with bad URL")
|
||||
try:
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", ignore_ssl=True) as admin_session:
|
||||
async with meshctrl.Session(env.mcurl, user="admin", ignore_ssl=True) as admin_session:
|
||||
pass
|
||||
except* meshctrl.exceptions.MeshCtrlError:
|
||||
pass
|
||||
else:
|
||||
raise Exception("Connected with no password")
|
||||
async with meshctrl.session.Session(env.mcurl+"/", user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session,\
|
||||
meshctrl.session.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session,\
|
||||
meshctrl.session.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True) as unprivileged_session:
|
||||
|
||||
start = time.time()
|
||||
try:
|
||||
async with meshctrl.Session(env.mcurl, user="admin", password="The wrong password", ignore_ssl=True) as admin_session:
|
||||
pass
|
||||
except* meshctrl.exceptions.ServerError as eg:
|
||||
assert str(eg.exceptions[0]) == "Invalid Auth" or eg.exceptions[0].message == "Invalid Auth", "Didn't get invalid auth message"
|
||||
assert time.time() - start < 10, "Invalid auth wasn't raised until after timeout"
|
||||
pass
|
||||
else:
|
||||
raise Exception("Connected with bad password")
|
||||
async with meshctrl.Session(env.mcurl+"/", user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session,\
|
||||
meshctrl.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session,\
|
||||
meshctrl.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True) as unprivileged_session:
|
||||
|
||||
assert len(await admin_session.list_users(timeout=10)) == 3, "Wrong number of users"
|
||||
|
||||
@@ -74,17 +120,17 @@ async def test_users(env):
|
||||
assert len(await admin_session.list_users(timeout=10)) == 3, "Failed to remove user"
|
||||
|
||||
async def test_login_token(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True) as s:
|
||||
async with meshctrl.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True) as s:
|
||||
token = await s.add_login_token("test", expire=1, timeout=10)
|
||||
print("\ninfo add_login_token: {}\n".format(token))
|
||||
|
||||
async with meshctrl.session.Session(env.mcurl, user=token["tokenUser"], password=token["tokenPass"], ignore_ssl=True) as s2:
|
||||
async with meshctrl.Session(env.mcurl, user=token["tokenUser"], password=token["tokenPass"], ignore_ssl=True) as s2:
|
||||
assert (await s2.user_info())["_id"] == (await s.user_info())["_id"], "Login token logged into wrong account"
|
||||
# Wait for the login token to expire
|
||||
await asyncio.sleep(65)
|
||||
|
||||
try:
|
||||
async with meshctrl.session.Session(env.mcurl, user=token["tokenUser"], password=token["tokenPass"], ignore_ssl=True) as s2:
|
||||
async with meshctrl.Session(env.mcurl, user=token["tokenUser"], password=token["tokenPass"], ignore_ssl=True) as s2:
|
||||
pass
|
||||
except:
|
||||
pass
|
||||
@@ -94,7 +140,7 @@ async def test_login_token(env):
|
||||
token = await s.add_login_token("test2", timeout=10)
|
||||
token2 = await s.add_login_token("test3", timeout=10)
|
||||
print("\ninfo add_login_token_no_expire: {}\n".format(token))
|
||||
async with meshctrl.session.Session(env.mcurl, user=token["tokenUser"], password=token["tokenPass"], ignore_ssl=True) as s2:
|
||||
async with meshctrl.Session(env.mcurl, user=token["tokenUser"], password=token["tokenPass"], ignore_ssl=True) as s2:
|
||||
assert (await s2.user_info())["_id"] == (await s.user_info())["_id"], "Login token logged into wrong account"
|
||||
|
||||
r = await s.list_login_tokens(timeout=10)
|
||||
@@ -107,9 +153,9 @@ async def test_login_token(env):
|
||||
assert len(await s.remove_login_token([token2["name"]], timeout=10)) == 0, "Residual login tokens"
|
||||
|
||||
async def test_mesh_device(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session,\
|
||||
meshctrl.session.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session,\
|
||||
meshctrl.session.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True) as unprivileged_session:
|
||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session,\
|
||||
meshctrl.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session,\
|
||||
meshctrl.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True) as unprivileged_session:
|
||||
# Test creating a mesh
|
||||
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
||||
print("\ninfo add_device_group: {}\n".format(mesh))
|
||||
@@ -157,21 +203,24 @@ async def test_mesh_device(env):
|
||||
|
||||
assert r[0].description == "New description", "Description either failed to change, or was changed by a user without permission to do so"
|
||||
|
||||
with env.create_agent(mesh.short_meshid) as agent:
|
||||
# There once was a bug that occured whenever running run_commands with multiple meshes. We need to add devices to both meshes to be sure that bug is squashed.
|
||||
with env.create_agent(mesh.short_meshid) as agent,\
|
||||
env.create_agent(mesh.short_meshid) as agent2,\
|
||||
env.create_agent(mesh2.short_meshid) as agent3:
|
||||
# Test agent added to device group being propagated correctly
|
||||
# Create agent isn't so good at waiting for the agent to show in the sessions. Give it a couple seconds to appear.
|
||||
for i in range(3):
|
||||
try:
|
||||
r = await admin_session.list_devices(timeout=10)
|
||||
print("\ninfo list_devices: {}\n".format(r))
|
||||
assert len(r) == 1, "Incorrect number of agents connected"
|
||||
assert len(r) == 3, "Incorrect number of agents connected"
|
||||
except:
|
||||
if i == 2:
|
||||
raise
|
||||
await asyncio.sleep(1)
|
||||
else:
|
||||
break
|
||||
assert len(await privileged_session.list_devices(timeout=10)) == 1, "Incorrect number of agents connected"
|
||||
assert len(await privileged_session.list_devices(timeout=10)) == 2, "Incorrect number of agents connected"
|
||||
assert len(await unprivileged_session.list_devices(timeout=10)) == 0, "Unprivileged account has access to agent it should not"
|
||||
|
||||
r = await admin_session.list_devices(details=True, timeout=10)
|
||||
@@ -183,6 +232,9 @@ async def test_mesh_device(env):
|
||||
r = await admin_session.list_devices(meshid=mesh.meshid, timeout=10)
|
||||
print("\ninfo list_devices_meshid: {}\n".format(r))
|
||||
|
||||
r = await admin_session.device_info(agent.nodeid, timeout=10)
|
||||
print("\ninfo admin_device_info: {}\n".format(r))
|
||||
|
||||
# Test editing device info propagating correctly
|
||||
assert await admin_session.edit_device(agent.nodeid, name="new_name", description="New Description", tags="device", consent=meshctrl.constants.ConsentFlags.all, timeout=10), "Failed to edit device info"
|
||||
|
||||
@@ -191,9 +243,12 @@ async def test_mesh_device(env):
|
||||
assert await admin_session.edit_device(agent.nodeid, consent=meshctrl.constants.ConsentFlags.none, timeout=10), "Failed to edit device info"
|
||||
|
||||
# Test run_commands
|
||||
r = await admin_session.run_command(agent.nodeid, "ls", timeout=10)
|
||||
r = await admin_session.run_command([agent.nodeid, agent2.nodeid], "ls", timeout=10)
|
||||
print("\ninfo run_command: {}\n".format(r))
|
||||
assert "meshagent" in r[agent.nodeid]["result"], "ls gave incorrect data"
|
||||
assert "meshagent" in r[agent2.nodeid]["result"], "ls gave incorrect data"
|
||||
assert "Run commands completed." not in r[agent.nodeid]["result"], "Didn't parse run command ending correctly"
|
||||
assert "Run commands completed." not in r[agent2.nodeid]["result"], "Didn't parse run command ending correctly"
|
||||
assert "meshagent" in (await privileged_session.run_command(agent.nodeid, "ls", timeout=10))[agent.nodeid]["result"], "ls gave incorrect data"
|
||||
|
||||
# Test run commands with ndividual device permissions
|
||||
@@ -222,7 +277,7 @@ async def test_mesh_device(env):
|
||||
|
||||
# Test getting individual device info
|
||||
r = await unprivileged_session.device_info(agent.nodeid, timeout=10)
|
||||
print("\ninfo device_info: {}\n".format(r))
|
||||
print("\ninfo unprivileged_device_info: {}\n".format(r))
|
||||
|
||||
# This device info includes the mesh ID of the device, even though the user doesn't have acces to that mesh. That's odd.
|
||||
# assert r.meshid is None, "Individual device is exposing its meshid"
|
||||
@@ -248,27 +303,27 @@ async def test_mesh_device(env):
|
||||
|
||||
assert await admin_session.move_to_device_group([agent.nodeid], mesh.name, isname=True, timeout=5), "Failed to move mesh to new device group by name"
|
||||
|
||||
# For now, this expects no response. If we ever figure out why the server isn't sending console information te us when it should, fix this.
|
||||
# For now, this expe namects no response. If we ever figure out why the server isn't sending console information te us when it should, fix this.
|
||||
# assert "meshagent" in (await unprivileged_session.run_command(agent.nodeid, "ls", timeout=10))[agent.nodeid]["result"], "ls gave incorrect data"
|
||||
try:
|
||||
await unprivileged_session.run_command(agent.nodeid, "ls", timeout=10)
|
||||
except:
|
||||
raise Exception("Failed to run command on device after it was moved to a new mesh while having individual device permissions")
|
||||
|
||||
r = await admin_session.remove_users_from_device_group((await privileged_session.user_info())["_id"], mesh.meshid, timeout=10)
|
||||
print("\ninfo remove_users_from_device_group: {}\n".format(r))
|
||||
assert (await admin_session.remove_users_from_device(agent.nodeid, (await unprivileged_session.user_info())["_id"], timeout=10)), "Failed to remove user from device"
|
||||
|
||||
assert (r[(await privileged_session.user_info())["_id"]]["success"]), "Failed to remove user from devcie group"
|
||||
r = await admin_session.remove_users_from_device_group((await privileged_session.user_info())["_id"], mesh.meshid, timeout=10)
|
||||
print("\ninfo remove_users_from_device_group: {}\n".format(r))
|
||||
assert (r[(await privileged_session.user_info())["_id"]]["success"]), "Failed to remove user from device group"
|
||||
assert (await admin_session.remove_users_from_device(agent.nodeid, (await unprivileged_session.user_info())["_id"], timeout=10)), "Failed to remove user from device"
|
||||
|
||||
|
||||
assert (await admin_session.remove_device_group(mesh.meshid, timeout=10)), "Failed to remove device group"
|
||||
assert (await admin_session.remove_device_group(mesh2.name, isname=True, timeout=10)), "Failed to remove device group"
|
||||
assert (await admin_session.remove_device_group(mesh2.name, isname=True, timeout=10)), "Failed to remove device group by name"
|
||||
assert not (await admin_session.add_users_to_device_group((await privileged_session.user_info())["_id"], mesh.meshid, rights=meshctrl.constants.MeshRights.fullrights, timeout=5))[(await privileged_session.user_info())["_id"]]["success"], "Added user to device group which doesn't exist?"
|
||||
|
||||
async def test_user_groups(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session,\
|
||||
meshctrl.session.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session,\
|
||||
meshctrl.session.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True) as unprivileged_session:
|
||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session,\
|
||||
meshctrl.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session,\
|
||||
meshctrl.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True) as unprivileged_session:
|
||||
|
||||
user_group = await admin_session.add_user_group("test", description="aoeu")
|
||||
print("\ninfo add_user_group: {}\n".format(user_group))
|
||||
@@ -294,7 +349,7 @@ async def test_user_groups(env):
|
||||
assert await admin_session.remove_user_group(user_group2.id.split("/")[-1])
|
||||
|
||||
async def test_events(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
await admin_session.list_events()
|
||||
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
||||
try:
|
||||
@@ -310,7 +365,7 @@ async def test_events(env):
|
||||
await asyncio.sleep(1)
|
||||
else:
|
||||
break
|
||||
async with meshctrl.session.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session:
|
||||
async with meshctrl.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session:
|
||||
|
||||
# assert len(await privileged_session.list_events()) == 0, "non-admin user has access to admin events"
|
||||
|
||||
@@ -337,8 +392,8 @@ async def test_events(env):
|
||||
assert (await admin_session.remove_device_group(mesh.meshid, timeout=10)), "Failed to remove device group"
|
||||
|
||||
async def test_interuser(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session,\
|
||||
meshctrl.session.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session:
|
||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session,\
|
||||
meshctrl.Session(env.mcurl, user="privileged", password=env.users["privileged"], ignore_ssl=True) as privileged_session:
|
||||
got_message = asyncio.Event()
|
||||
async def _():
|
||||
async for message in admin_session.events({"action": "interuser"}):
|
||||
@@ -361,7 +416,7 @@ async def test_interuser(env):
|
||||
tg.create_task(asyncio.wait_for(got_message.wait(), 5))
|
||||
|
||||
async def test_session_files(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
||||
try:
|
||||
with env.create_agent(mesh.short_meshid) as agent:
|
||||
@@ -378,7 +433,7 @@ async def test_session_files(env):
|
||||
break
|
||||
pwd = (await admin_session.run_command(agent.nodeid, "pwd", timeout=10))[agent.nodeid]["result"].strip()
|
||||
|
||||
randdata = random.randbytes(2000000)
|
||||
randdata = random.randbytes(20000000)
|
||||
upfilestream = io.BytesIO(randdata)
|
||||
downfilestream = io.BytesIO()
|
||||
os.makedirs(os.path.join(thisdir, "data"), exist_ok=True)
|
||||
@@ -387,20 +442,18 @@ async def test_session_files(env):
|
||||
|
||||
r = await admin_session.upload(agent.nodeid, upfilestream, f"{pwd}/test", timeout=5)
|
||||
print("\ninfo files_upload: {}\n".format(r))
|
||||
assert r["result"] == "success", "Upload failed"
|
||||
assert r["result"] == True, "Upload failed"
|
||||
assert r["size"] == len(randdata), "Uploaded wrong number of bytes"
|
||||
|
||||
r = await admin_session.upload_file(agent.nodeid, os.path.join(thisdir, "data", "test"), f"{pwd}/test2", timeout=5)
|
||||
print("\ninfo files_upload: {}\n".format(r))
|
||||
assert r["result"] == "success", "Upload failed"
|
||||
assert r["result"] == True, "Upload failed"
|
||||
assert r["size"] == len(randdata), "Uploaded wrong number of bytes"
|
||||
|
||||
s = await admin_session.download(agent.nodeid, f"{pwd}/test", timeout=5)
|
||||
s.seek(0)
|
||||
assert s.read() == randdata, "Downloaded bad data"
|
||||
|
||||
await admin_session.download(agent.nodeid, f"{pwd}/test", downfilestream, timeout=5)
|
||||
downfilestream.seek(0)
|
||||
assert downfilestream.read() == randdata, "Downloaded bad data"
|
||||
|
||||
await admin_session.download_file(agent.nodeid, f"{pwd}/test2", os.path.join(thisdir, "data", "test"), timeout=5)
|
||||
@@ -410,7 +463,7 @@ async def test_session_files(env):
|
||||
|
||||
r = await admin_session.upload_file(agent.nodeid, os.path.join(thisdir, "data", "test"), f"{pwd}/test2", unique_file_tunnel=True, timeout=5)
|
||||
|
||||
assert r["result"] == "success", "Upload failed"
|
||||
assert r["result"] == True, "Upload failed"
|
||||
assert r["size"] == len(randdata), "Uploaded wrong number of bytes"
|
||||
|
||||
await admin_session.download_file(agent.nodeid, f"{pwd}/test2", os.path.join(thisdir, "data", "test"), unique_file_tunnel=True, timeout=5)
|
||||
|
||||
@@ -5,7 +5,7 @@ import meshctrl
|
||||
import requests
|
||||
|
||||
async def test_shell(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
||||
try:
|
||||
with env.create_agent(mesh.short_meshid) as agent:
|
||||
@@ -40,7 +40,7 @@ async def test_shell(env):
|
||||
|
||||
|
||||
async def test_smart_shell(env):
|
||||
async with meshctrl.session.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
||||
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
||||
try:
|
||||
with env.create_agent(mesh.short_meshid) as agent:
|
||||
|
||||
Reference in New Issue
Block a user