forked from Narcissus/pylibmeshctrl
Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4eda4e6c08 | ||
|
|
ab2a4c40bc | ||
|
|
0a657cee48 | ||
|
|
03441161b2 | ||
|
|
24adf3baa5 | ||
|
|
1adaccabc0 | ||
|
|
20843dbea7 | ||
|
|
af6c020506 |
BIN
requirements.txt
BIN
requirements.txt
Binary file not shown.
@@ -46,6 +46,7 @@ install_requires =
|
|||||||
importlib-metadata
|
importlib-metadata
|
||||||
cryptography>=43.0.3
|
cryptography>=43.0.3
|
||||||
websockets>=13.1
|
websockets>=13.1
|
||||||
|
python-socks[asyncio]
|
||||||
|
|
||||||
|
|
||||||
[options.packages.find]
|
[options.packages.find]
|
||||||
|
|||||||
@@ -8,6 +8,8 @@ import json
|
|||||||
import datetime
|
import datetime
|
||||||
import io
|
import io
|
||||||
import ssl
|
import ssl
|
||||||
|
import urllib
|
||||||
|
from python_socks.async_.asyncio import Proxy
|
||||||
from . import constants
|
from . import constants
|
||||||
from . import exceptions
|
from . import exceptions
|
||||||
from . import util
|
from . import util
|
||||||
@@ -28,9 +30,10 @@ class Session(object):
|
|||||||
domain (str): Domain to connect to
|
domain (str): Domain to connect to
|
||||||
password (str): Password with which to connect. Can also be password generated from token.
|
password (str): Password with which to connect. Can also be password generated from token.
|
||||||
loginkey (str|bytes): Key from already handled login. Overrides username/password.
|
loginkey (str|bytes): Key from already handled login. Overrides username/password.
|
||||||
proxy (str): "url:port" to use for proxy server NOTE: This is currently not implemented due to a limitation of the undersying websocket library. Upvote the issue if you find this important.
|
proxy (str): "url:port" to use for proxy server
|
||||||
token (str): Login token. This appears to be superfluous
|
token (str): Login token. This appears to be superfluous
|
||||||
ignore_ssl (bool): Ignore SSL errors
|
ignore_ssl (bool): Ignore SSL errors
|
||||||
|
auto_reconnect (bool): In case of server failure, attempt to auto reconnect. All outstanding requests will be killed.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
:py:class:`Session`: Session connected to url
|
:py:class:`Session`: Session connected to url
|
||||||
@@ -43,9 +46,19 @@ class Session(object):
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(self, url, user=None, domain=None, password=None, loginkey=None, proxy=None, token=None, ignore_ssl=False, auto_reconnect=False):
|
def __init__(self, url, user=None, domain=None, password=None, loginkey=None, proxy=None, token=None, ignore_ssl=False, auto_reconnect=False):
|
||||||
if len(url) < 5 or ((not url.startswith('wss://')) and (not url.startswith('ws://'))):
|
parsed = urllib.parse.urlparse(url)
|
||||||
|
|
||||||
|
if parsed.scheme not in ("wss", "ws"):
|
||||||
raise ValueError("Invalid URL")
|
raise ValueError("Invalid URL")
|
||||||
|
|
||||||
|
port = 80
|
||||||
|
if parsed.port is None:
|
||||||
|
if parsed.scheme == "wss":
|
||||||
|
port = 443
|
||||||
|
p = list(parsed)
|
||||||
|
p[1] = f"{parsed.hostname}:{port}"
|
||||||
|
url = urllib.parse.urlunparse(p)
|
||||||
|
|
||||||
if (not url.endswith('/')):
|
if (not url.endswith('/')):
|
||||||
url += '/'
|
url += '/'
|
||||||
|
|
||||||
@@ -92,6 +105,7 @@ class Session(object):
|
|||||||
self._inflight = set()
|
self._inflight = set()
|
||||||
self._file_tunnels = {}
|
self._file_tunnels = {}
|
||||||
self._ignore_ssl = ignore_ssl
|
self._ignore_ssl = ignore_ssl
|
||||||
|
self.auto_reconnect = auto_reconnect
|
||||||
|
|
||||||
self._eventer = util.Eventer()
|
self._eventer = util.Eventer()
|
||||||
|
|
||||||
@@ -120,18 +134,15 @@ class Session(object):
|
|||||||
ssl_context.verify_mode = ssl.CERT_NONE
|
ssl_context.verify_mode = ssl.CERT_NONE
|
||||||
options = { "ssl": ssl_context }
|
options = { "ssl": ssl_context }
|
||||||
|
|
||||||
# Setup the HTTP proxy if needed
|
|
||||||
# if (self._proxy != None):
|
|
||||||
# options.agent = new https_proxy_agent(urllib.parse(self._proxy))
|
|
||||||
|
|
||||||
headers = websockets.datastructures.Headers()
|
headers = websockets.datastructures.Headers()
|
||||||
|
|
||||||
if (self._password):
|
if (self._password):
|
||||||
token = self._token if self._token else b""
|
token = self._token if self._token else b""
|
||||||
headers['x-meshauth'] = (base64.b64encode(self._user.encode()) + b',' + base64.b64encode(self._password.encode()) + token).decode()
|
headers['x-meshauth'] = (base64.b64encode(self._user.encode()) + b',' + base64.b64encode(self._password.encode()) + token).decode()
|
||||||
|
|
||||||
|
|
||||||
options["additional_headers"] = headers
|
options["additional_headers"] = headers
|
||||||
async for websocket in websockets.asyncio.client.connect(self.url, process_exception=util._process_websocket_exception, **options):
|
async for websocket in util.proxy_connect(self.url, proxy_url=self._proxy, process_exception=util._process_websocket_exception, **options):
|
||||||
self.alive = True
|
self.alive = True
|
||||||
self._socket_open.set()
|
self._socket_open.set()
|
||||||
try:
|
try:
|
||||||
@@ -139,10 +150,10 @@ class Session(object):
|
|||||||
tg.create_task(self._listen_data_task(websocket))
|
tg.create_task(self._listen_data_task(websocket))
|
||||||
tg.create_task(self._send_data_task(websocket))
|
tg.create_task(self._send_data_task(websocket))
|
||||||
except* websockets.ConnectionClosed as e:
|
except* websockets.ConnectionClosed as e:
|
||||||
|
|
||||||
self._socket_open.clear()
|
self._socket_open.clear()
|
||||||
if not self.auto_reconnect:
|
if not self.auto_reconnect:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
except* Exception as eg:
|
except* Exception as eg:
|
||||||
self.alive = False
|
self.alive = False
|
||||||
self._socket_open.clear()
|
self._socket_open.clear()
|
||||||
@@ -1326,6 +1337,14 @@ class Session(object):
|
|||||||
if sysinfo is not None and sysinfo.get("node", None):
|
if sysinfo is not None and sysinfo.get("node", None):
|
||||||
# Node information came with system information
|
# Node information came with system information
|
||||||
node = sysinfo.get("node", None)
|
node = sysinfo.get("node", None)
|
||||||
|
for meshid, _nodes in nodes["nodes"].items():
|
||||||
|
for _mesh in meshes:
|
||||||
|
if _mesh.meshid == meshid:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
if meshid == node["meshid"]:
|
||||||
|
node["mesh"] = _mesh
|
||||||
else:
|
else:
|
||||||
# This device does not have system information, get node information from the nodes list.
|
# This device does not have system information, get node information from the nodes list.
|
||||||
for meshid, _nodes in nodes["nodes"].items():
|
for meshid, _nodes in nodes["nodes"].items():
|
||||||
@@ -1859,17 +1878,20 @@ class Session(object):
|
|||||||
:py:class:`~meshctrl.exceptions.FileTransferCancelled`: File transfer cancelled. Info available on the `stats` property
|
:py:class:`~meshctrl.exceptions.FileTransferCancelled`: File transfer cancelled. Info available on the `stats` property
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
io.IOBase: The stream which has been downloaded into. Cursor will be at the end of the stream.
|
io.IOBase: The stream which has been downloaded into. Cursor will be at the beginning of where the file is downloaded.
|
||||||
'''
|
'''
|
||||||
if target is None:
|
if target is None:
|
||||||
target = io.BytesIO()
|
target = io.BytesIO()
|
||||||
|
start = target.tell()
|
||||||
if unique_file_tunnel:
|
if unique_file_tunnel:
|
||||||
async with self.file_explorer(nodeid) as files:
|
async with self.file_explorer(nodeid) as files:
|
||||||
await files.download(source, target)
|
await files.download(source, target)
|
||||||
|
target.seek(start)
|
||||||
return target
|
return target
|
||||||
else:
|
else:
|
||||||
files = await self._cached_file_explorer(nodeid, nodeid)
|
files = await self._cached_file_explorer(nodeid, nodeid)
|
||||||
await files.download(source, target, timeout=timeout)
|
await files.download(source, target, timeout=timeout)
|
||||||
|
target.seek(start)
|
||||||
return target
|
return target
|
||||||
|
|
||||||
async def download_file(self, nodeid, source, filepath, unique_file_tunnel=False, timeout=None):
|
async def download_file(self, nodeid, source, filepath, unique_file_tunnel=False, timeout=None):
|
||||||
@@ -1887,10 +1909,10 @@ class Session(object):
|
|||||||
:py:class:`~meshctrl.exceptions.FileTransferCancelled`: File transfer cancelled. Info available on the `stats` property
|
:py:class:`~meshctrl.exceptions.FileTransferCancelled`: File transfer cancelled. Info available on the `stats` property
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
io.IOBase: The stream which has been downloaded into. Cursor will be at the end of the stream.
|
None
|
||||||
'''
|
'''
|
||||||
with open(filepath, "wb") as f:
|
with open(filepath, "wb") as f:
|
||||||
return await self.download(nodeid, source, f, unique_file_tunnel, timeout=timeout)
|
await self.download(nodeid, source, f, unique_file_tunnel, timeout=timeout)
|
||||||
|
|
||||||
async def _cached_file_explorer(self, nodeid, _id):
|
async def _cached_file_explorer(self, nodeid, _id):
|
||||||
if (_id not in self._file_tunnels or not self._file_tunnels[_id].alive):
|
if (_id not in self._file_tunnels or not self._file_tunnels[_id].alive):
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ import websockets.asyncio
|
|||||||
import websockets.asyncio.client
|
import websockets.asyncio.client
|
||||||
import asyncio
|
import asyncio
|
||||||
import ssl
|
import ssl
|
||||||
|
from python_socks.async_.asyncio import Proxy
|
||||||
|
import urllib
|
||||||
from . import exceptions
|
from . import exceptions
|
||||||
from . import util
|
from . import util
|
||||||
from . import constants
|
from . import constants
|
||||||
@@ -52,10 +54,6 @@ class Tunnel(object):
|
|||||||
ssl_context.verify_mode = ssl.CERT_NONE
|
ssl_context.verify_mode = ssl.CERT_NONE
|
||||||
options = { "ssl": ssl_context }
|
options = { "ssl": ssl_context }
|
||||||
|
|
||||||
# Setup the HTTP proxy if needed
|
|
||||||
# if (self._session._proxy != None):
|
|
||||||
# options.agent = new https_proxy_agent(urllib.parse(this._proxy))
|
|
||||||
|
|
||||||
if (self.node_id.split('/') != 3) and (self._session._currentDomain is not None):
|
if (self.node_id.split('/') != 3) and (self._session._currentDomain is not None):
|
||||||
self.node_id = f"node/{self._session._currentDomain}/{self.node_id}"
|
self.node_id = f"node/{self._session._currentDomain}/{self.node_id}"
|
||||||
|
|
||||||
@@ -72,14 +70,7 @@ class Tunnel(object):
|
|||||||
|
|
||||||
self.url = self._session.url.replace('/control.ashx', '/meshrelay.ashx?browser=1&p=' + str(self._protocol) + '&nodeid=' + self.node_id + '&id=' + self._tunnel_id + '&auth=' + authcookie["cookie"])
|
self.url = self._session.url.replace('/control.ashx', '/meshrelay.ashx?browser=1&p=' + str(self._protocol) + '&nodeid=' + self.node_id + '&id=' + self._tunnel_id + '&auth=' + authcookie["cookie"])
|
||||||
|
|
||||||
# headers = websockets.datastructures.Headers()
|
async for websocket in util.proxy_connect(self.url, proxy_url=self._session._proxy, process_exception=util._process_websocket_exception, **options):
|
||||||
|
|
||||||
# if (self._password):
|
|
||||||
# token = self._token if self._token else b""
|
|
||||||
# headers['x-meshauth'] = (base64.b64encode(self._user.encode()) + b',' + base64.b64encode(self._password.encode()) + token).decode()
|
|
||||||
|
|
||||||
# options["additional_headers"] = headers
|
|
||||||
async for websocket in websockets.asyncio.client.connect(self.url, process_exception=util._process_websocket_exception, **options):
|
|
||||||
self.alive = True
|
self.alive = True
|
||||||
self._socket_open.set()
|
self._socket_open.set()
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -9,6 +9,9 @@ import re
|
|||||||
import websockets
|
import websockets
|
||||||
import ssl
|
import ssl
|
||||||
import functools
|
import functools
|
||||||
|
import urllib
|
||||||
|
import python_socks
|
||||||
|
from python_socks.async_.asyncio import Proxy
|
||||||
from . import exceptions
|
from . import exceptions
|
||||||
|
|
||||||
def _encode_cookie(o, key):
|
def _encode_cookie(o, key):
|
||||||
@@ -139,17 +142,36 @@ def compare_dict(dict1, dict2):
|
|||||||
def _check_socket(f):
|
def _check_socket(f):
|
||||||
@functools.wraps(f)
|
@functools.wraps(f)
|
||||||
async def wrapper(self, *args, **kwargs):
|
async def wrapper(self, *args, **kwargs):
|
||||||
await self.initialized.wait()
|
try:
|
||||||
if not self.alive and self._main_loop_error is not None:
|
async with asyncio.TaskGroup() as tg:
|
||||||
raise self._main_loop_error
|
tg.create_task(asyncio.wait_for(self.initialized.wait(), 10))
|
||||||
elif not self.alive:
|
tg.create_task(asyncio.wait_for(self._socket_open.wait(), 10))
|
||||||
raise exceptions.SocketError("Socket Closed")
|
finally:
|
||||||
return await f(self, *args, **kwargs)
|
if not self.alive and self._main_loop_error is not None:
|
||||||
|
raise self._main_loop_error
|
||||||
|
elif not self.alive:
|
||||||
|
raise exceptions.SocketError("Socket Closed")
|
||||||
|
return await f(self, *args, **kwargs)
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
def _process_websocket_exception(exc):
|
def _process_websocket_exception(exc):
|
||||||
tmp = websockets.asyncio.client.process_exception(exc)
|
tmp = websockets.asyncio.client.process_exception(exc)
|
||||||
# SSLVerification error is a subclass of OSError, but doesn't make sense no retry, so we need to handle it separately.
|
# SSLVerification error is a subclass of OSError, but doesn't make sense to retry, so we need to handle it separately.
|
||||||
if isinstance(exc, (ssl.SSLCertVerificationError, TimeoutError)):
|
if isinstance(exc, (ssl.SSLCertVerificationError, TimeoutError)):
|
||||||
return exc
|
return exc
|
||||||
|
if isinstance(exc, python_socks._errors.ProxyError):
|
||||||
|
return None
|
||||||
return tmp
|
return tmp
|
||||||
|
|
||||||
|
class proxy_connect(websockets.asyncio.client.connect):
|
||||||
|
def __init__(self,*args, proxy_url=None, **kwargs):
|
||||||
|
self.proxy = None
|
||||||
|
if proxy_url is not None:
|
||||||
|
self.proxy = Proxy.from_url(proxy_url)
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
async def create_connection(self, *args, **kwargs):
|
||||||
|
if self.proxy is not None:
|
||||||
|
parsed = urllib.parse.urlparse(self.uri)
|
||||||
|
self.connection_kwargs["sock"] = await self.proxy.connect(dest_host=parsed.hostname, dest_port=parsed.port)
|
||||||
|
return await super().create_connection(*args, **kwargs)
|
||||||
@@ -54,15 +54,23 @@ class TestEnvironment(object):
|
|||||||
self._subp = None
|
self._subp = None
|
||||||
self.mcurl = "wss://localhost:8086"
|
self.mcurl = "wss://localhost:8086"
|
||||||
self.clienturl = "http://localhost:5000"
|
self.clienturl = "http://localhost:5000"
|
||||||
self._dockerurl = "host.docker.internal:8086"
|
self.dockerurl = "host.docker.internal:8086"
|
||||||
|
self.proxyurl = "http://localhost:3128"
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
global _docker_process
|
global _docker_process
|
||||||
if _docker_process is not None:
|
if _docker_process is not None:
|
||||||
self._subp = _docker_process
|
self._subp = _docker_process
|
||||||
return self
|
return self
|
||||||
|
# Destroy the env in case it wasn't killed correctly last time.
|
||||||
|
subprocess.check_call(["docker", "compose", "down"], stdout=subprocess.DEVNULL, cwd=thisdir)
|
||||||
self._subp = _docker_process = subprocess.Popen(["docker", "compose", "up", "--build", "--force-recreate", "--no-deps"], stdout=subprocess.DEVNULL, cwd=thisdir)
|
self._subp = _docker_process = subprocess.Popen(["docker", "compose", "up", "--build", "--force-recreate", "--no-deps"], stdout=subprocess.DEVNULL, cwd=thisdir)
|
||||||
timeout = 30
|
if not self._wait_for_meshcentral():
|
||||||
|
self.__exit__(None, None, None)
|
||||||
|
raise Exception("Failed to create docker instance")
|
||||||
|
return self
|
||||||
|
|
||||||
|
def _wait_for_meshcentral(self, timeout=30):
|
||||||
start = time.time()
|
start = time.time()
|
||||||
while time.time() - start < timeout:
|
while time.time() - start < timeout:
|
||||||
try:
|
try:
|
||||||
@@ -79,16 +87,23 @@ class TestEnvironment(object):
|
|||||||
pass
|
pass
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
else:
|
else:
|
||||||
self.__exit__(None, None, None)
|
return False
|
||||||
raise Exception("Failed to create docker instance")
|
return True
|
||||||
return self
|
|
||||||
|
|
||||||
|
|
||||||
def __exit__(self, exc_t, exc_v, exc_tb):
|
def __exit__(self, exc_t, exc_v, exc_tb):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def create_agent(self, meshid):
|
def create_agent(self, meshid):
|
||||||
return Agent(meshid, self.mcurl, self.clienturl, self._dockerurl)
|
return Agent(meshid, self.mcurl, self.clienturl, self.dockerurl)
|
||||||
|
|
||||||
|
# Restart our docker instances, to test reconnect code.
|
||||||
|
def restart_mesh(self):
|
||||||
|
subprocess.check_call(["docker", "container", "restart", "meshctrl-meshcentral"], stdout=subprocess.DEVNULL, cwd=thisdir)
|
||||||
|
assert self._wait_for_meshcentral(), "Failed to restart docker instance"
|
||||||
|
|
||||||
|
def restart_proxy(self):
|
||||||
|
subprocess.check_call(["docker", "container", "restart", "meshctrl-squid"], stdout=subprocess.DEVNULL, cwd=thisdir)
|
||||||
|
|
||||||
|
|
||||||
def _kill_docker_process():
|
def _kill_docker_process():
|
||||||
if _docker_process is not None:
|
if _docker_process is not None:
|
||||||
|
|||||||
@@ -19,9 +19,9 @@ services:
|
|||||||
# # mongodb data-directory - A must for data persistence
|
# # mongodb data-directory - A must for data persistence
|
||||||
# - ./meshcentral/mongodb_data:/data/db
|
# - ./meshcentral/mongodb_data:/data/db
|
||||||
networks:
|
networks:
|
||||||
- meshctrl
|
- meshctrl
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
meshcentral:
|
meshcentral:
|
||||||
restart: always
|
restart: always
|
||||||
@@ -49,4 +49,21 @@ services:
|
|||||||
healthcheck:
|
healthcheck:
|
||||||
test: curl -k --fail https://localhost:443/ || exit 1
|
test: curl -k --fail https://localhost:443/ || exit 1
|
||||||
interval: 5s
|
interval: 5s
|
||||||
timeout: 120s
|
timeout: 120s
|
||||||
|
|
||||||
|
squid:
|
||||||
|
image: ubuntu/squid:latest
|
||||||
|
restart: unless-stopped
|
||||||
|
container_name: meshctrl-squid
|
||||||
|
ports:
|
||||||
|
- 3128:3128
|
||||||
|
|
||||||
|
networks:
|
||||||
|
- meshctrl
|
||||||
|
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- ./config/squid/conf.d:/etc/squid/conf.d
|
||||||
|
- ./config/squid/squid.conf:/etc/squid/squid.conf
|
||||||
11
tests/environment/config/squid/conf.d/meshctrl.conf
Normal file
11
tests/environment/config/squid/conf.d/meshctrl.conf
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Logs are managed by logrotate on Debian
|
||||||
|
logfile_rotate 0
|
||||||
|
|
||||||
|
acl all src all
|
||||||
|
acl Safe_ports port 8086
|
||||||
|
acl SSS_ports port 8086
|
||||||
|
http_access allow all
|
||||||
|
debug_options ALL,0 85,2 88,2
|
||||||
|
|
||||||
|
# Set max_filedescriptors to avoid using system's RLIMIT_NOFILE. See LP: #1978272
|
||||||
|
max_filedescriptors 1024
|
||||||
9350
tests/environment/config/squid/squid.conf
Normal file
9350
tests/environment/config/squid/squid.conf
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@ import io
|
|||||||
import random
|
import random
|
||||||
|
|
||||||
async def test_commands(env):
|
async def test_commands(env):
|
||||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
async with meshctrl.Session("wss://" + env.dockerurl, user="admin", password=env.users["admin"], ignore_ssl=True, proxy=env.proxyurl) as admin_session:
|
||||||
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
||||||
try:
|
try:
|
||||||
with env.create_agent(mesh.short_meshid) as agent:
|
with env.create_agent(mesh.short_meshid) as agent:
|
||||||
@@ -53,7 +53,7 @@ async def test_commands(env):
|
|||||||
assert (await admin_session.remove_device_group(mesh.meshid, timeout=10)), "Failed to remove device group"
|
assert (await admin_session.remove_device_group(mesh.meshid, timeout=10)), "Failed to remove device group"
|
||||||
|
|
||||||
async def test_upload_download(env):
|
async def test_upload_download(env):
|
||||||
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True) as admin_session:
|
async with meshctrl.Session("wss://" + env.dockerurl, user="admin", password=env.users["admin"], ignore_ssl=True, proxy=env.proxyurl) as admin_session:
|
||||||
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
mesh = await admin_session.add_device_group("test", description="This is a test group", amtonly=False, features=0, consent=0, timeout=10)
|
||||||
try:
|
try:
|
||||||
with env.create_agent(mesh.short_meshid) as agent:
|
with env.create_agent(mesh.short_meshid) as agent:
|
||||||
|
|||||||
@@ -25,6 +25,10 @@ async def test_sanity(env):
|
|||||||
print("\ninfo server_info: {}\n".format(await s.server_info()))
|
print("\ninfo server_info: {}\n".format(await s.server_info()))
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
async def test_proxy(env):
|
||||||
|
async with meshctrl.Session("wss://" + env.dockerurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=True, proxy=env.proxyurl) as s:
|
||||||
|
pass
|
||||||
|
|
||||||
async def test_ssl(env):
|
async def test_ssl(env):
|
||||||
try:
|
try:
|
||||||
async with meshctrl.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=False) as s:
|
async with meshctrl.Session(env.mcurl, user="unprivileged", password=env.users["unprivileged"], ignore_ssl=False) as s:
|
||||||
@@ -32,4 +36,21 @@ async def test_ssl(env):
|
|||||||
except* ssl.SSLCertVerificationError:
|
except* ssl.SSLCertVerificationError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
raise Exception("Invalid SSL certificate accepted")
|
raise Exception("Invalid SSL certificate accepted")
|
||||||
|
|
||||||
|
async def test_urlparse():
|
||||||
|
# This tests the url port adding necessitated by python-socks. Our test environment doesn't use 443, so this is just a quick sanity test.
|
||||||
|
try:
|
||||||
|
async with meshctrl.Session("wss://localhost", user="unprivileged", password="Not a real password", ignore_ssl=True) as s:
|
||||||
|
pass
|
||||||
|
except* TimeoutError:
|
||||||
|
#We're not running a server, so timeout is our expected outcome
|
||||||
|
pass
|
||||||
|
|
||||||
|
# This tests our check for wss/ws url schemes
|
||||||
|
try:
|
||||||
|
async with meshctrl.Session("https://localhost", user="unprivileged", password="Not a real password", ignore_ssl=True) as s:
|
||||||
|
pass
|
||||||
|
except* ValueError:
|
||||||
|
#We're not running a server, so timeout is our expected outcome
|
||||||
|
pass
|
||||||
@@ -31,6 +31,35 @@ async def test_admin(env):
|
|||||||
assert len(admin_users) == len(env.users.keys()), "Admin cannot see correct number of users"
|
assert len(admin_users) == len(env.users.keys()), "Admin cannot see correct number of users"
|
||||||
assert len(admin_sessions) == 2, "Admin cannot see correct number of oser sessions"
|
assert len(admin_sessions) == 2, "Admin cannot see correct number of oser sessions"
|
||||||
|
|
||||||
|
async def test_auto_reconnect(env):
|
||||||
|
async with meshctrl.Session(env.mcurl, user="admin", password=env.users["admin"], ignore_ssl=True, auto_reconnect=True) as admin_session:
|
||||||
|
env.restart_mesh()
|
||||||
|
await asyncio.sleep(10)
|
||||||
|
await admin_session.ping(timeout=10)
|
||||||
|
|
||||||
|
# As above, but with proxy
|
||||||
|
async with meshctrl.Session("wss://" + env.dockerurl, user="admin", password=env.users["admin"], ignore_ssl=True, auto_reconnect=True, proxy=env.proxyurl) as admin_session:
|
||||||
|
env.restart_mesh()
|
||||||
|
for i in range(3):
|
||||||
|
try:
|
||||||
|
await admin_session.ping(timeout=10)
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise Exception("Failed to reconnect")
|
||||||
|
|
||||||
|
env.restart_proxy()
|
||||||
|
for i in range(3):
|
||||||
|
try:
|
||||||
|
await admin_session.ping(timeout=10)
|
||||||
|
except* Exception as e:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise Exception("Failed to reconnect")
|
||||||
|
|
||||||
|
|
||||||
async def test_users(env):
|
async def test_users(env):
|
||||||
try:
|
try:
|
||||||
@@ -396,11 +425,9 @@ async def test_session_files(env):
|
|||||||
assert r["size"] == len(randdata), "Uploaded wrong number of bytes"
|
assert r["size"] == len(randdata), "Uploaded wrong number of bytes"
|
||||||
|
|
||||||
s = await admin_session.download(agent.nodeid, f"{pwd}/test", timeout=5)
|
s = await admin_session.download(agent.nodeid, f"{pwd}/test", timeout=5)
|
||||||
s.seek(0)
|
|
||||||
assert s.read() == randdata, "Downloaded bad data"
|
assert s.read() == randdata, "Downloaded bad data"
|
||||||
|
|
||||||
await admin_session.download(agent.nodeid, f"{pwd}/test", downfilestream, timeout=5)
|
await admin_session.download(agent.nodeid, f"{pwd}/test", downfilestream, timeout=5)
|
||||||
downfilestream.seek(0)
|
|
||||||
assert downfilestream.read() == randdata, "Downloaded bad data"
|
assert downfilestream.read() == randdata, "Downloaded bad data"
|
||||||
|
|
||||||
await admin_session.download_file(agent.nodeid, f"{pwd}/test2", os.path.join(thisdir, "data", "test"), timeout=5)
|
await admin_session.download_file(agent.nodeid, f"{pwd}/test2", os.path.join(thisdir, "data", "test"), timeout=5)
|
||||||
|
|||||||
Reference in New Issue
Block a user