Optimize pwndbg.commands.ai import time (#1984)

Before:

```
root@pwndbg:~/pwndbg# PYTHONPROFILEIMPORTTIME=1 gdb --batch 2>&1 | grep 'pwndbg.commands.ai'
import time:      5363 |     174096 |   pwndbg.commands.ai
root@pwndbg:~/pwndbg# PYTHONPROFILEIMPORTTIME=1 gdb --batch 2>&1 | grep 'pwndbg.commands.ai'
import time:      1885 |     154032 |   pwndbg.commands.ai
root@pwndbg:~/pwndbg# PYTHONPROFILEIMPORTTIME=1 gdb --batch 2>&1 | grep 'pwndbg.commands.ai'
import time:      2085 |     148208 |   pwndbg.commands.ai
root@pwndbg:~/pwndbg# PYTHONPROFILEIMPORTTIME=1 gdb --batch 2>&1 | grep 'pwndbg.commands.ai'
import time:      1903 |     146919 |   pwndbg.commands.ai
```

After:

```
root@pwndbg:~/pwndbg# PYTHONPROFILEIMPORTTIME=1 gdb --batch 2>&1 | grep 'pwndbg.commands.ai'
import time:      5522 |     105163 |   pwndbg.commands.ai
root@pwndbg:~/pwndbg# PYTHONPROFILEIMPORTTIME=1 gdb --batch 2>&1 | grep 'pwndbg.commands.ai'
import time:      1842 |      88943 |   pwndbg.commands.ai
root@pwndbg:~/pwndbg# PYTHONPROFILEIMPORTTIME=1 gdb --batch 2>&1 | grep 'pwndbg.commands.ai'
import time:      1780 |      85127 |   pwndbg.commands.ai
root@pwndbg:~/pwndbg# PYTHONPROFILEIMPORTTIME=1 gdb --batch 2>&1 | grep 'pwndbg.commands.ai'
import time:      1825 |      83504 |   pwndbg.commands.ai
root@pwndbg:~/pwndbg# PYTHONPROFILEIMPORTTIME=1 gdb --batch 2>&1 | grep 'pwndbg.commands.ai'
import time:      1806 |      87045 |   pwndbg.commands.ai
root@pwndbg:~/pwndbg# PYTHONPROFILEIMPORTTIME=1 gdb --batch 2>&1 | grep 'pwndbg.commands.ai'
import time:      1756 |      81687 |   pwndbg.commands.ai
```
pull/1985/head
Disconnect3d 2 years ago committed by GitHub
parent b549286626
commit 69fd145cee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -13,7 +13,6 @@ import pprint
import re
import gdb
import requests
import pwndbg
import pwndbg.color.message as M
@ -70,6 +69,13 @@ dummy = False
verbosity = 0
def _requests():
"""Lazy import requests since its import is quite heavy"""
import requests
return requests
def set_dummy_mode(d=True) -> None:
global dummy
dummy = d
@ -275,11 +281,11 @@ def query_openai_chat(prompt, model="gpt-3.5-turbo", max_tokens=100, temperature
"temperature": temperature,
}
url = "https://api.openai.com/v1/chat/completions"
r = requests.post(
r = _requests().post(
url,
data=json.dumps(data),
headers={"Content-Type": "application/json"},
auth=("Bearer", config.ai_openai_api_key), # type: ignore[arg-type]
auth=("Bearer", config.ai_openai_api_key),
)
res = r.json()
if verbosity > 0:
@ -315,11 +321,11 @@ def query_openai_completions(prompt, model="text-davinci-003", max_tokens=100, t
"stop": ["\n\nHuman:"],
}
url = "https://api.openai.com/v1/completions"
r = requests.post(
r = _requests().post(
url,
data=json.dumps(data),
headers={"Content-Type": "application/json"},
auth=("Bearer", config.ai_openai_api_key), # type: ignore[arg-type]
auth=("Bearer", config.ai_openai_api_key),
)
res = r.json()
if verbosity > 0:
@ -367,7 +373,7 @@ def query_anthropic(prompt, model="claude-v1", max_tokens=100, temperature=0.0):
}
headers = {"x-api-key": config.ai_anthropic_api_key.value, "Content-Type": "application/json"}
url = "https://api.anthropic.com/v1/complete"
response = requests.post(url, data=json.dumps(data), headers=headers)
response = _requests().post(url, data=json.dumps(data), headers=headers)
data = response.json()
try:
return data["completion"].strip()
@ -378,7 +384,7 @@ def query_anthropic(prompt, model="claude-v1", max_tokens=100, temperature=0.0):
def get_openai_models():
url = "https://api.openai.com/v1/models"
r = requests.get(url, auth=("Bearer", config.ai_openai_api_key)) # type: ignore[arg-type]
r = _requests().get(url, auth=("Bearer", config.ai_openai_api_key))
res = r.json()
if verbosity > 0:
print(M.warn(pprint.pformat(res)))

Loading…
Cancel
Save