Newer
Older
htb2trilium / htb_client.py
0xRoM on 6 Oct 7 KB initial commit
  1. import requests
  2. import warnings
  3.  
  4. warnings.filterwarnings("ignore")
  5.  
  6. class HTBClient:
  7. def __init__(self, password):
  8. self.password = password
  9. self.base_url = 'https://labs.hackthebox.com/api/v4'
  10. self.proxies = {
  11. #"http": "http://127.0.0.1:8080", # Burp proxy for HTTP traffic
  12. #"https": "http://127.0.0.1:8080" # Burp proxy for HTTPS traffic
  13. }
  14. # Fetch user info
  15. self.user = self.get_user_info()
  16. # Fetch user stats and store them in self.user
  17. user_owns, root_owns, respects = self.get_user_stats(self.user['id'])
  18. self.user['user_owns'] = user_owns
  19. self.user['root_owns'] = root_owns
  20. self.user['respects'] = respects
  21.  
  22. def get_user_info(self):
  23. headers = {
  24. "Authorization": f"Bearer {self.password}",
  25. "User-Agent": None # Explicitly remove User-Agent
  26. }
  27. response = requests.get(
  28. f'{self.base_url}/user/info',
  29. headers=headers,
  30. proxies=self.proxies,
  31. verify=False # Disable SSL verification
  32. )
  33. if response.status_code != 200:
  34. raise Exception(f"Error fetching {self.base_url}/user/info user info: {response.status_code}, {response.text}")
  35. # Return the user info as a dictionary
  36. data = response.json().get('info')
  37. return {'id': data['id'], 'name': data['name'], 'email': data['email']}
  38.  
  39. def get_user_stats(self, user_id):
  40. headers = {
  41. "Authorization": f"Bearer {self.password}",
  42. "User-Agent": None # Explicitly remove User-Agent
  43. }
  44. response = requests.get(
  45. f'{self.base_url}/user/profile/basic/{user_id}',
  46. headers=headers,
  47. proxies=self.proxies,
  48. verify=False # Disable SSL verification
  49. )
  50. if response.status_code != 200:
  51. raise Exception(f"Error fetching user stats: {response.status_code}, {response.text}")
  52. # Extract user statistics from the response
  53. data = response.json().get('profile')
  54. user_owns = data['user_owns']
  55. root_owns = data['system_owns']
  56. respects = data['respects']
  57. return user_owns, root_owns, respects
  58.  
  59. def get_active_machines(self):
  60. machines = []
  61. seen_ids = set() # Track unique machine IDs
  62. seen_names = set() # Track unique machine names
  63. page = 1
  64.  
  65. while True:
  66. response = requests.get(
  67. f'{self.base_url}/machine/paginated?per_page=100&page={page}',
  68. headers={
  69. "Authorization": f"Bearer {self.password}",
  70. "User-Agent": None # Explicitly remove User-Agent
  71. },
  72. proxies=self.proxies,
  73. verify=False # Disable SSL verification
  74. )
  75.  
  76. if response.status_code != 200:
  77. raise Exception(f"Error fetching active machines: {response.status_code}, {response.text}")
  78.  
  79. data = response.json()
  80. for machine in data['data']:
  81. if machine['id'] not in seen_ids and machine['name'] not in seen_names:
  82. machines.append(machine)
  83. seen_ids.add(machine['id'])
  84. seen_names.add(machine['name'])
  85.  
  86. # Check for pagination
  87. if page >= data['meta']['last_page']:
  88. break
  89. page += 1
  90. return machines
  91.  
  92. def get_retired_machines(self):
  93. machines = []
  94. seen_ids = set() # Track unique machine IDs
  95. seen_names = set() # Track unique machine names
  96. page = 1
  97.  
  98. while True:
  99. response = requests.get(
  100. f'{self.base_url}/machine/list/retired/paginated?per_page=100&page={page}',
  101. headers={
  102. "Authorization": f"Bearer {self.password}",
  103. "User-Agent": None # Explicitly remove User-Agent
  104. },
  105. proxies=self.proxies,
  106. verify=False # Disable SSL verification
  107. )
  108.  
  109. if response.status_code != 200:
  110. raise Exception(f"Error fetching retired machines: {response.status_code}, {response.text}")
  111.  
  112. data = response.json()
  113. for machine in data['data']:
  114. if machine['id'] not in seen_ids and machine['name'] not in seen_names:
  115. machines.append(machine)
  116. seen_ids.add(machine['id'])
  117. seen_names.add(machine['name'])
  118.  
  119. # Check for pagination
  120. if page >= data['meta']['last_page']:
  121. break
  122. page += 1
  123. return machines
  124.  
  125. def get_all_machines(self):
  126. # Combine active and retired machines, ensuring no duplicates
  127. active_machines = self.get_active_machines()
  128. retired_machines = self.get_retired_machines()
  129.  
  130. all_machines = active_machines + retired_machines
  131. seen_ids = set() # Track unique machine IDs
  132. seen_names = set() # Track unique machine names
  133. unique_machines = []
  134.  
  135. for machine in all_machines:
  136. if machine['id'] not in seen_ids and machine['name'] not in seen_names:
  137. unique_machines.append(machine)
  138. seen_ids.add(machine['id'])
  139. seen_names.add(machine['name'])
  140.  
  141. return unique_machines
  142.  
  143. def get_all_challenges(self):
  144. challenges = []
  145. seen_ids = set() # Track unique machine IDs
  146. seen_names = set() # Track unique machine names
  147. page = 1
  148.  
  149. while True:
  150. response = requests.get(
  151. f'{self.base_url}/challenges?per_page=100&page={page}',
  152. headers={
  153. "Authorization": f"Bearer {self.password}",
  154. "User-Agent": None # Explicitly remove User-Agent
  155. },
  156. proxies=self.proxies,
  157. verify=False # Disable SSL verification
  158. )
  159.  
  160. if response.status_code != 200:
  161. raise Exception(f"Error fetching challenges: {response.status_code}, {response.text}")
  162.  
  163. data = response.json()
  164. for challenge in data['data']:
  165. if challenge['id'] not in seen_ids and challenge['name'] not in seen_names:
  166. challenges.append(challenge)
  167. seen_ids.add(challenge['id'])
  168. seen_names.add(challenge['name'])
  169.  
  170. # Check for pagination
  171. if page >= data['meta']['last_page']:
  172. break
  173. page += 1
  174. return challenges
  175.  
  176. def get_all_sherlocks(self):
  177. sherlocks = []
  178. seen_ids = set() # Track unique machine IDs
  179. seen_names = set() # Track unique machine names
  180. page = 1
  181.  
  182. while True:
  183. response = requests.get(
  184. f'{self.base_url}/sherlocks?per_page=100&page={page}',
  185. headers={
  186. "Authorization": f"Bearer {self.password}",
  187. "User-Agent": None # Explicitly remove User-Agent
  188. },
  189. proxies=self.proxies,
  190. verify=False # Disable SSL verification
  191. )
  192.  
  193. if response.status_code != 200:
  194. raise Exception(f"Error fetching sherlocks: {response.status_code}, {response.text}")
  195.  
  196. data = response.json()
  197. for sherlock in data['data']:
  198. if sherlock['id'] not in seen_ids and sherlock['name'] not in seen_names:
  199. sherlocks.append(sherlock)
  200. seen_ids.add(sherlock['id'])
  201. seen_names.add(sherlock['name'])
  202.  
  203. # Check for pagination
  204. if page >= data['meta']['last_page']:
  205. break
  206. page += 1
  207. return sherlocks
Buy Me A Coffee