docs(conductor): Synchronize tech-stack and commit monitor script updates

This commit is contained in:
2026-02-09 06:42:38 -08:00
parent 1c693aade4
commit ad49e12368
8 changed files with 78 additions and 28 deletions

View File

@@ -23,8 +23,8 @@ def test_aggregate_cluster_status(mock_node_map, mock_nomad_allocs, mock_litefs,
# Mock LiteFS data
def litefs_side_effect(addr, **kwargs):
if addr == "1.1.1.1":
return {"is_primary": True, "uptime": 100, "dbs": {"db1": {}}}
return {"is_primary": False, "uptime": 50, "dbs": {"db1": {}}}
return {"is_primary": True, "candidate": True, "uptime": 100, "dbs": {"db1": {"txid": "0000000000000001", "checksum": "abc"}}}
return {"is_primary": False, "candidate": True, "uptime": 50, "dbs": {"db1": {"txid": "0000000000000001", "checksum": "abc"}}}
mock_litefs.side_effect = litefs_side_effect
@@ -35,11 +35,8 @@ def test_aggregate_cluster_status(mock_node_map, mock_nomad_allocs, mock_litefs,
node1 = next(n for n in cluster_data["nodes"] if n["node"] == "node1")
assert node1["litefs_primary"] is True
assert node1["status"] == "passing"
node2 = next(n for n in cluster_data["nodes"] if n["node"] == "node2")
assert node2["litefs_primary"] is False
assert node2["status"] == "standby" # Not in Consul but replica
assert node1["candidate"] is True
assert "db1" in node1["dbs"]
@patch("consul_client.get_cluster_services")
@patch("litefs_client.get_node_status")
@@ -53,7 +50,7 @@ def test_aggregate_cluster_status_unhealthy(mock_node_map, mock_nomad_logs, mock
{"id": "alloc1", "node": "node1", "ip": "1.1.1.1"}
]
# Primary in LiteFS but missing in Consul
mock_litefs.return_value = {"is_primary": True, "uptime": 100, "dbs": {"db1": {}}}
mock_litefs.return_value = {"is_primary": True, "candidate": True, "uptime": 100, "dbs": {"db1": {"txid": "1", "checksum": "abc"}}}
mock_consul.return_value = []
mock_nomad_logs.return_code = 0
mock_nomad_logs.return_value = "error logs"

View File

@@ -21,15 +21,19 @@ def test_format_node_table():
"node": "node1",
"role": "primary",
"status": "passing",
"uptime": 100,
"candidate": True,
"uptime": "1h",
"replication_lag": "N/A",
"litefs_primary": True
"litefs_primary": True,
"dbs": {"db1": {"txid": "1", "checksum": "abc"}}
}
]
table = output_formatter.format_node_table(nodes, use_color=False)
assert "node1" in table
assert "primary" in table
assert "passing" in table
assert "db1" in table
assert "Cand" in table
def test_format_diagnostics():
"""Test the diagnostics section generation."""

View File

@@ -111,6 +111,8 @@ Allocation Addresses:
Label Dynamic Address
*http yes 1.1.1.1:4533 -> 4533
*litefs yes 1.1.1.1:20202 -> 20202
Task Events:
Started At = 2026-02-09T14:00:00Z
"""
mock_alloc2 = MagicMock()
mock_alloc2.stdout = """
@@ -120,14 +122,14 @@ Allocation Addresses:
Label Dynamic Address
*http yes 2.2.2.2:4533 -> 4533
*litefs yes 2.2.2.2:20202 -> 20202
Task Events:
Started At = 2026-02-09T14:00:00Z
"""
mock_run.side_effect = [mock_job_status, mock_alloc1, mock_alloc2]
# This should fail initially because nomad_client.get_job_allocations doesn't exist
try:
allocs = nomad_client.get_job_allocations("navidrome-litefs")
assert len(allocs) == 2
assert allocs[0]["ip"] == "1.1.1.1"
except AttributeError:
pytest.fail("nomad_client.get_job_allocations not implemented")
allocs = nomad_client.get_job_allocations("navidrome-litefs")
assert len(allocs) == 2
assert allocs[0]["ip"] == "1.1.1.1"
assert "uptime" in allocs[0]
assert allocs[0]["uptime"] != "N/A"