forked from Narcissus/pylibmeshctrl
Compare commits
33 Commits
feat/run_c
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cac746906f | ||
|
|
6290bc7298 | ||
|
|
5975e145a7 | ||
|
|
cd6707a279 | ||
|
|
7b9d82b8e6 | ||
|
|
7cefd24a9d | ||
|
|
cbc1f9223f | ||
|
|
3fa1ca2e32 | ||
|
|
ee812220fb | ||
|
|
002f652c8c | ||
|
|
0b09f64821 | ||
|
|
12a3040f89 | ||
|
|
e0694f980c | ||
|
|
61053549f2 | ||
|
|
fb3d043431 | ||
|
|
c13985739b | ||
|
|
db1914c87b | ||
|
|
b0d071d87f | ||
|
|
3bcedf5610 | ||
|
|
9c7a8c39b0 | ||
|
|
7ba6989325 | ||
|
|
748e39d5b4 | ||
|
|
6dae40eb40 | ||
|
|
c7d628716e | ||
|
|
1f9979ddd1 | ||
| d4b9524814 | |||
|
|
bc1db8f2b3 | ||
|
|
403c0cd0ec | ||
|
|
0b0029563a | ||
|
|
0b32896c88 | ||
|
|
2304810ee6 | ||
|
|
4cda54ab60 | ||
|
|
87fad5aa13 |
@@ -3,3 +3,4 @@ Contributors
|
||||
============
|
||||
|
||||
* Josiah Baldwin <jbaldwin8889@gmail.com>
|
||||
* Daan Selen <https://github.com/DaanSelen>
|
||||
@@ -2,6 +2,38 @@
|
||||
Changelog
|
||||
=========
|
||||
|
||||
version 1.3.3
|
||||
=============
|
||||
|
||||
Improvements:
|
||||
* Dependency bumps
|
||||
|
||||
Bugs:
|
||||
* Fix run_commands having an issue with ignore_output
|
||||
|
||||
version 1.3.2
|
||||
=============
|
||||
|
||||
Improvements:
|
||||
* Fix race condition that could occur when running `run_command` or `run_console_command`
|
||||
|
||||
version 1.3.1
|
||||
=============
|
||||
|
||||
Improvments:
|
||||
* Basically just everything in 1.3.0, this is a release fix
|
||||
|
||||
version 1.3.0
|
||||
=============
|
||||
|
||||
Improvements:
|
||||
* Improved how run_commands was handled (#51)
|
||||
* Added remove device functionality (#52)
|
||||
* Added run_console_commands functionality (#55)
|
||||
|
||||
Bugs:
|
||||
* Silly documentation being wrong (#53)
|
||||
|
||||
version 1.2.2
|
||||
=============
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ sphinx-jinja2-compat>=0.1.1
|
||||
sphinx-toolbox>=2.16.0
|
||||
# sphinx_rtd_theme
|
||||
cffi~=1.17.1
|
||||
cryptography~=44.0.1
|
||||
pycparser~=2.22
|
||||
websockets~=15.0.0
|
||||
enum_tools
|
||||
cryptography~=46.0.5
|
||||
websockets~=16.0.0
|
||||
BIN
requirements.txt
BIN
requirements.txt
Binary file not shown.
@@ -44,9 +44,9 @@ python_requires = >=3.8
|
||||
# For more information, check out https://semver.org/.
|
||||
install_requires =
|
||||
importlib-metadata
|
||||
cryptography~=44.0.1
|
||||
websockets~=15.0.0
|
||||
python-socks[asyncio]~=2.5.3
|
||||
cryptography~=46.0.5
|
||||
websockets~=16.0.0
|
||||
python-socks[asyncio]~=2.8.1
|
||||
|
||||
|
||||
[options.packages.find]
|
||||
|
||||
@@ -295,6 +295,23 @@ class Device(object):
|
||||
'''
|
||||
return await self._session.reset_devices(self.nodeid, timeout=timeout)
|
||||
|
||||
async def remove(self, timeout=None):
|
||||
'''
|
||||
Remove device from MeshCentral
|
||||
|
||||
Args:
|
||||
nodeids (str|list[str]): nodeid(s) of the device(s) that have to be removed
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Returns:
|
||||
bool: True on success, raise otherwise
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
return self._session.remove_devices(self.nodeid, timeout)
|
||||
|
||||
async def sleep(self, timeout=None):
|
||||
'''
|
||||
Sleep device
|
||||
|
||||
@@ -157,7 +157,7 @@ class Files(tunnel.Tunnel):
|
||||
|
||||
async def rm(self, path, files, recursive=False, timeout=None):
|
||||
"""
|
||||
Create a directory on the device. This API doesn't error if the file doesn't exist.
|
||||
Remove a set of files or directories from the device. This API doesn't error if the file doesn't exist.
|
||||
|
||||
Args:
|
||||
path (str): Directory from which to delete files
|
||||
|
||||
@@ -240,23 +240,28 @@ class Session(object):
|
||||
async def __aexit__(self, exc_t, exc_v, exc_tb):
|
||||
await self.close()
|
||||
|
||||
@util._check_socket
|
||||
async def _send_command(self, data, name, timeout=None):
|
||||
id = f"meshctrl_{name}_{self._get_command_id()}"
|
||||
def _generate_response_id(self, name):
|
||||
responseid = f"meshctrl_{name}_{self._get_command_id()}"
|
||||
# This fixes a very theoretical bug with hash colisions in the case of an infinite int of requests. Now the bug will only happen if there are currently 2**32-1 of the same type of request going out at the same time
|
||||
while id in self._inflight:
|
||||
id = f"meshctrl_{name}_{self._get_command_id()}"
|
||||
while responseid in self._inflight:
|
||||
responseid = f"meshctrl_{name}_{self._get_command_id()}"
|
||||
return responseid
|
||||
|
||||
self._inflight.add(id)
|
||||
@util._check_socket
|
||||
async def _send_command(self, data, name, timeout=None, responseid=None):
|
||||
if responseid is None:
|
||||
responseid = self._generate_response_id(name)
|
||||
|
||||
self._inflight.add(responseid)
|
||||
responded = asyncio.Event()
|
||||
response = None
|
||||
async def _(data):
|
||||
self._inflight.remove(id)
|
||||
self._inflight.remove(responseid)
|
||||
nonlocal response
|
||||
response = data
|
||||
responded.set()
|
||||
self._eventer.once(id, _)
|
||||
await self._message_queue.put(json.dumps(data | {"tag": id, "responseid": id}))
|
||||
self._eventer.once(responseid, _)
|
||||
await self._message_queue.put(json.dumps(data | {"tag": responseid, "responseid": responseid}))
|
||||
await asyncio.wait_for(responded.wait(), timeout=timeout)
|
||||
if isinstance(response, Exception):
|
||||
raise response
|
||||
@@ -571,7 +576,7 @@ class Session(object):
|
||||
while True:
|
||||
data = await event_queue.get()
|
||||
if filter and not util.compare_dict(filter, data):
|
||||
continue
|
||||
continue
|
||||
yield data
|
||||
finally:
|
||||
self._eventer.off("server_event", _)
|
||||
@@ -1062,6 +1067,30 @@ class Session(object):
|
||||
raise exceptions.ServerError(data["result"])
|
||||
return True
|
||||
|
||||
async def remove_devices(self, nodeids, timeout=None):
|
||||
'''
|
||||
Remove device(s) from MeshCentral
|
||||
|
||||
Args:
|
||||
nodeids (str|list[str]): nodeid(s) of the device(s) that have to be removed
|
||||
timeout (int): duration in seconds to wait for a response before throwing an error
|
||||
|
||||
Returns:
|
||||
bool: True on success, raise otherwise
|
||||
|
||||
Raises:
|
||||
:py:class:`~meshctrl.exceptions.ServerError`: Error text from server if there is a failure
|
||||
:py:class:`~meshctrl.exceptions.SocketError`: Info about socket closure
|
||||
asyncio.TimeoutError: Command timed out
|
||||
'''
|
||||
if isinstance(nodeids, str):
|
||||
nodeids = [nodeids]
|
||||
|
||||
data = await self._send_command({ "action": 'removedevices', "nodeids": nodeids}, "remove_devices", timeout=timeout)
|
||||
|
||||
if data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
return True
|
||||
|
||||
async def add_device_group(self, name, description="", amtonly=False, features=0, consent=0, timeout=None):
|
||||
'''
|
||||
@@ -1435,7 +1464,7 @@ class Session(object):
|
||||
|
||||
async def run_command(self, nodeids, command, powershell=False, runasuser=False, runasuseronly=False, ignore_output=False, timeout=None):
|
||||
'''
|
||||
Run a command on any number of nodes. WARNING: Non namespaced call. Calling this function again before it returns may cause unintended consequences.
|
||||
Run a command on any number of nodes. WARNING: Non namespaced call on older versions of meshcentral (<1.0.22). Calling this function on those versions again before it returns may cause unintended consequences.
|
||||
|
||||
Args:
|
||||
nodeids (str|list[str]): Unique ids of nodes on which to run the command
|
||||
@@ -1473,69 +1502,86 @@ class Session(object):
|
||||
return nid
|
||||
|
||||
result = {n: {"complete": False, "result": [], "command": command} for n in nodeids}
|
||||
console_result = {n: {"complete": False, "result": [], "command": command} for n in nodeids}
|
||||
reply_result = {n: {"complete": False, "result": [], "command": command} for n in nodeids}
|
||||
async def _console():
|
||||
async for event in self.events({"action": "msg", "type": "console"}):
|
||||
node = match_nodeid(event["nodeid"], nodeids)
|
||||
if node:
|
||||
if event["value"] == "Run commands completed.":
|
||||
result.setdefault(node, {})["complete"] = True
|
||||
if all(_["complete"] for key, _ in result.items()):
|
||||
console_result.setdefault(node, {})["complete"] = True
|
||||
if all(_["complete"] for key, _ in console_result.items()):
|
||||
break
|
||||
continue
|
||||
elif (event["value"].startswith("Run commands")):
|
||||
continue
|
||||
result[node]["result"].append(event["value"])
|
||||
console_result[node]["result"].append(event["value"])
|
||||
|
||||
# We create this task AFTER getting the first message, but I don't feel like implementing this twice, so we'll pass in the first message and have it parsed immediately
|
||||
async def _reply(responseid, start_data=None):
|
||||
async def _reply(responseid, data=None):
|
||||
# Returns True when all results are in, Falsey otherwise
|
||||
def _parse_event(event):
|
||||
node = match_nodeid(event["nodeid"], nodeids)
|
||||
if node:
|
||||
result.setdefault(node, {})["complete"] = True
|
||||
result[node]["result"].append(event["result"])
|
||||
if all(_["complete"] for key, _ in result.items()):
|
||||
reply_result.setdefault(node, {})["complete"] = True
|
||||
reply_result[node]["result"].append(event["result"])
|
||||
if all(_["complete"] for key, _ in reply_result.items()):
|
||||
return True
|
||||
if start_data is not None:
|
||||
if _parse_event(start_data):
|
||||
if data is not None:
|
||||
if _parse_event(data):
|
||||
return
|
||||
async for event in self.events({"action": "msg", "type": "runcommands", "responseid": responseid}):
|
||||
async for event in self.events({"action": "msg", "type": "runcommands", "responseid":responseid}):
|
||||
if _parse_event(event):
|
||||
break
|
||||
|
||||
async def __(command, tg, tasks):
|
||||
data = await self._send_command(command, "run_command", timeout=timeout)
|
||||
nonlocal result
|
||||
responseid = self._generate_response_id("run_command")
|
||||
|
||||
if not ignore_output:
|
||||
reply_task = tg.create_task(asyncio.wait_for(_reply(responseid), timeout=timeout))
|
||||
# We still need to parse the console results because it sends them without namespace, this will likely break older versions of meshcentral
|
||||
console_task = tg.create_task(asyncio.wait_for(_console(), timeout=timeout))
|
||||
data = await self._send_command(command, "run_command", timeout=timeout, responseid=responseid)
|
||||
|
||||
if data.get("type", None) != "runcommands" and data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
elif data.get("type", None) != "runcommands" and data.get("result", "ok").lower() == "ok":
|
||||
expect_response = False
|
||||
console_task = tg.create_task(asyncio.wait_for(_console(), timeout=timeout))
|
||||
if not ignore_output:
|
||||
userid = (await self.user_info())["_id"]
|
||||
for n in nodeids:
|
||||
device_info = await self.device_info(n, timeout=timeout)
|
||||
try:
|
||||
permissions = device_info.mesh.links.get(userid, {}).get("rights",constants.DeviceRights.norights)\
|
||||
# This should work for device rights, but it only seems to work for mesh rights. Not sure why, but I can't get the events to show up when the user only has individual device rights
|
||||
# |device_info.get("links", {}).get(userid, {}).get("rights", constants.DeviceRights.norights)
|
||||
# If we don't have agentconsole rights, we won't be able te read the output, so fill in blanks on this node
|
||||
if not permissions&constants.DeviceRights.agentconsole:
|
||||
result[n]["complete"] = True
|
||||
else:
|
||||
expect_response = True
|
||||
except AttributeError:
|
||||
reply_task.cancel()
|
||||
result = console_result
|
||||
userid = (await self.user_info())["_id"]
|
||||
for n in nodeids:
|
||||
device_info = await self.device_info(n, timeout=timeout)
|
||||
try:
|
||||
permissions = device_info.mesh.links.get(userid, {}).get("rights",constants.DeviceRights.norights)
|
||||
# This should work for device rights, but it only seems to work for mesh rights. Not sure why, but I can't get the events to show up when the user only has individual device rights
|
||||
# |device_info.get("links", {}).get(userid, {}).get("rights", constants.DeviceRights.norights)
|
||||
# If we don't have agentconsole rights, we won't be able te read the output, so fill in blanks on this node
|
||||
if not permissions&constants.DeviceRights.agentconsole:
|
||||
result[n]["complete"] = True
|
||||
else:
|
||||
expect_response = True
|
||||
except AttributeError:
|
||||
result[n]["complete"] = True
|
||||
if expect_response:
|
||||
tasks.append(console_task)
|
||||
else:
|
||||
console_task.cancel()
|
||||
elif data.get("type", None) == "runcommands" and not ignore_output:
|
||||
tasks.append(tg.create_task(asyncio.wait_for(_reply(data["responseid"], start_data=data), timeout=timeout)))
|
||||
elif data.get("type", None) == "runcommands":
|
||||
console_task.cancel()
|
||||
if not ignore_output:
|
||||
result = reply_result
|
||||
tasks.append(reply_task)
|
||||
else:
|
||||
# if not ignore_output:
|
||||
console_task.cancel()
|
||||
reply_task.cancel()
|
||||
raise exceptions.ServerError(f"Unrecognized response: {data}")
|
||||
|
||||
tasks = []
|
||||
async with asyncio.TaskGroup() as tg:
|
||||
tasks.append(tg.create_task(__({ "action": 'runcommands', "nodeids": nodeids, "type": (2 if powershell else 0), "cmds": command, "runAsUser": runAsUser, "reply": not ignore_output}, tg, tasks)))
|
||||
tasks.append(tg.create_task(__({ "action": 'runcommands', "nodeids": nodeids, "type": (2 if powershell else 0), "cmds": command, "runAsUser": runAsUser, "reply": True}, tg, tasks)))
|
||||
|
||||
return {n: v | {"result": "".join(v["result"])} for n,v in result.items()}
|
||||
|
||||
@@ -1573,20 +1619,23 @@ class Session(object):
|
||||
result = {n: {"complete": False, "result": [], "command": command} for n in nodeids}
|
||||
async def _console():
|
||||
async for event in self.events({"action": "msg", "type": "console"}):
|
||||
node = match_nodeid(event["nodeid"], nodeids)
|
||||
if node:
|
||||
result[node]["result"].append(event["value"])
|
||||
result.setdefault(node, {})["complete"] = True
|
||||
if all(_["complete"] for key, _ in result.items()):
|
||||
break
|
||||
# We can pick up run commands here sometimes if they are run in quick succession. Try to avoid that.
|
||||
if (not event["value"].startswith("Run commands")):
|
||||
node = match_nodeid(event["nodeid"], nodeids)
|
||||
if node:
|
||||
result[node]["result"].append(event["value"])
|
||||
result.setdefault(node, {})["complete"] = True
|
||||
if all(_["complete"] for key, _ in result.items()):
|
||||
break
|
||||
async def __(command, tg, tasks):
|
||||
console_task = tg.create_task(asyncio.wait_for(_console(), timeout=timeout))
|
||||
data = await self._send_command(command, "run_console_command", timeout=timeout)
|
||||
|
||||
if data.get("type", None) != "runcommands" and data.get("result", "ok").lower() != "ok":
|
||||
raise exceptions.ServerError(data["result"])
|
||||
elif data.get("type", None) != "runcommands" and data.get("result", "ok").lower() == "ok":
|
||||
expect_response = False
|
||||
console_task = tg.create_task(asyncio.wait_for(_console(), timeout=timeout))
|
||||
|
||||
if not ignore_output:
|
||||
userid = (await self.user_info())["_id"]
|
||||
for n in nodeids:
|
||||
@@ -1606,6 +1655,9 @@ class Session(object):
|
||||
tasks.append(console_task)
|
||||
else:
|
||||
console_task.cancel()
|
||||
else:
|
||||
console_task.cancel()
|
||||
raise exceptions.ServerError(f"Unrecognized response: {data}")
|
||||
|
||||
tasks = []
|
||||
async with asyncio.TaskGroup() as tg:
|
||||
|
||||
@@ -4,7 +4,10 @@ import subprocess
|
||||
import time
|
||||
import json
|
||||
import atexit
|
||||
import pytest
|
||||
try:
|
||||
import pytest
|
||||
except:
|
||||
pass
|
||||
import requests
|
||||
thisdir = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
@@ -68,6 +71,9 @@ class TestEnvironment(object):
|
||||
if not self._wait_for_meshcentral():
|
||||
self.__exit__(None, None, None)
|
||||
raise Exception("Failed to create docker instance")
|
||||
if not self._wait_for_client_server():
|
||||
self.__exit__(None, None, None)
|
||||
raise Exception("Failed to create client server")
|
||||
return self
|
||||
|
||||
def _wait_for_meshcentral(self, timeout=30):
|
||||
@@ -90,6 +96,26 @@ class TestEnvironment(object):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _wait_for_client_server(self, timeout=30):
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
data = subprocess.check_output(["docker", "inspect", "meshctrl-client", "--format='{{json .State.Health}}'"], cwd=thisdir, stderr=subprocess.DEVNULL)
|
||||
# docker outputs for humans, not computers. This is the easiest way to chop off the ends
|
||||
data = json.loads(data.strip()[1:-1])
|
||||
except Exception as e:
|
||||
time.sleep(1)
|
||||
continue
|
||||
try:
|
||||
if data["Status"] == "healthy":
|
||||
break
|
||||
except:
|
||||
pass
|
||||
time.sleep(1)
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
def __exit__(self, exc_t, exc_v, exc_tb):
|
||||
pass
|
||||
|
||||
@@ -112,10 +138,13 @@ def _kill_docker_process():
|
||||
|
||||
atexit.register(_kill_docker_process)
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def env():
|
||||
with TestEnvironment() as e:
|
||||
yield e
|
||||
try:
|
||||
@pytest.fixture(scope="session")
|
||||
def env():
|
||||
with TestEnvironment() as e:
|
||||
yield e
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -9,6 +9,8 @@ services:
|
||||
image: client
|
||||
build:
|
||||
dockerfile: client.dockerfile
|
||||
sysctls:
|
||||
net.ipv6.conf.all.disable_ipv6: 1
|
||||
ports:
|
||||
- 5000:5000
|
||||
depends_on:
|
||||
@@ -20,6 +22,10 @@ services:
|
||||
# - ./meshcentral/mongodb_data:/data/db
|
||||
networks:
|
||||
- meshctrl
|
||||
healthcheck:
|
||||
test: curl --fail http://localhost:5000/ || exit 1
|
||||
interval: 5s
|
||||
timeout: 120s
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
@@ -28,6 +34,8 @@ services:
|
||||
container_name: meshctrl-meshcentral
|
||||
# use the official meshcentral container
|
||||
image: meshcentral
|
||||
sysctls:
|
||||
net.ipv6.conf.all.disable_ipv6: 1
|
||||
build:
|
||||
dockerfile: meshcentral.dockerfile
|
||||
ports:
|
||||
@@ -55,6 +63,8 @@ services:
|
||||
image: ubuntu/squid:latest
|
||||
restart: unless-stopped
|
||||
container_name: meshctrl-squid
|
||||
sysctls:
|
||||
net.ipv6.conf.all.disable_ipv6: 1
|
||||
ports:
|
||||
- 3128:3128
|
||||
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
# Logs are managed by logrotate on Debian
|
||||
logfile_rotate 0
|
||||
|
||||
acl all src all
|
||||
acl to_ipv6 dst ipv6
|
||||
acl from_ipv6 src ipv6
|
||||
|
||||
acl to_ipv4 dst ipv4
|
||||
acl from_ipv4 src ipv4
|
||||
|
||||
#acl all src all
|
||||
acl Safe_ports port 8086
|
||||
acl SSS_ports port 8086
|
||||
http_access allow to_ipv4
|
||||
http_access allow from_ipv4
|
||||
http_access deny to_ipv6
|
||||
http_access deny from_ipv6
|
||||
http_access allow all
|
||||
debug_options ALL,0 85,2 88,2
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ghcr.io/ylianst/meshcentral:1.1.50
|
||||
FROM ghcr.io/ylianst/meshcentral:1.1.56
|
||||
RUN apk add curl
|
||||
RUN apk add python3
|
||||
WORKDIR /opt/meshcentral/
|
||||
|
||||
@@ -62,7 +62,7 @@ def remove_agent(agentid):
|
||||
|
||||
@api.route('/', methods=['GET'])
|
||||
def slash():
|
||||
return [_["id"] for _ in agents]
|
||||
return [value["id"] for key, value in agents.items()]
|
||||
|
||||
if __name__ == '__main__':
|
||||
api.run()
|
||||
@@ -1,6 +1,6 @@
|
||||
requests
|
||||
pytest-asyncio
|
||||
cffi==1.17.1
|
||||
cryptography~=44.0.1
|
||||
pycparser==2.22
|
||||
websockets~=15.0.0
|
||||
cryptography~=46.0.5
|
||||
websockets~=16.0.0
|
||||
@@ -251,6 +251,12 @@ async def test_mesh_device(env):
|
||||
assert "Run commands completed." not in r[agent2.nodeid]["result"], "Didn't parse run command ending correctly"
|
||||
assert "meshagent" in (await privileged_session.run_command(agent.nodeid, "ls", timeout=10))[agent.nodeid]["result"], "ls gave incorrect data"
|
||||
|
||||
# Test run_commands ignore output
|
||||
r = await admin_session.run_command([agent.nodeid, agent2.nodeid], "ls", ignore_output=True, timeout=10)
|
||||
print("\ninfo run_command ignore_output: {}\n".format(r))
|
||||
assert r[agent.nodeid]["result"] == '', "Ignore output returned an output"
|
||||
assert r[agent2.nodeid]["result"] == '', "Ignore output returned an output"
|
||||
|
||||
# Test run_commands missing device
|
||||
try:
|
||||
await admin_session.run_command([agent.nodeid, "notanid"], "ls", timeout=10)
|
||||
@@ -259,11 +265,18 @@ async def test_mesh_device(env):
|
||||
else:
|
||||
raise Exception("Run command on a device that doesn't exist did not raise an exception")
|
||||
|
||||
# Test run_console_command
|
||||
r = await admin_session.run_console_command([agent.nodeid, agent2.nodeid], "info", timeout=10)
|
||||
print("\ninfo run_console_command: {}\n".format(r))
|
||||
assert agent.nodeid in r[agent.nodeid]["result"], "Run console command gave bad response"
|
||||
assert agent2.nodeid in r[agent2.nodeid]["result"], "Run console command gave bad response"
|
||||
|
||||
# Test run_console_command ignore output
|
||||
r = await admin_session.run_console_command([agent.nodeid, agent2.nodeid], "info", timeout=10, ignore_output=True)
|
||||
print("\ninfo run_console_command ignore_output: {}\n".format(r))
|
||||
assert r[agent.nodeid]["result"] == '', "Ignore output returned an output"
|
||||
assert r[agent2.nodeid]["result"] == '', "Ignore output returned an output"
|
||||
|
||||
# Test run_commands missing device
|
||||
try:
|
||||
await admin_session.run_console_command([agent.nodeid, "notanid"], "info", timeout=10)
|
||||
@@ -336,6 +349,15 @@ async def test_mesh_device(env):
|
||||
r = await admin_session.remove_users_from_device_group((await privileged_session.user_info())["_id"], mesh.meshid, timeout=10)
|
||||
print("\ninfo remove_users_from_device_group: {}\n".format(r))
|
||||
assert (r[(await privileged_session.user_info())["_id"]]["success"]), "Failed to remove user from device group"
|
||||
|
||||
await admin_session.remove_devices(agent2.nodeid, timeout=10)
|
||||
try:
|
||||
await admin_session.device_info(agent2.nodeid, timeout=10)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
raise Exception("Device not deleted")
|
||||
|
||||
assert (await admin_session.remove_users_from_device(agent.nodeid, (await unprivileged_session.user_info())["_id"], timeout=10)), "Failed to remove user from device"
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user