API
Local
import requests
import json
import pyttsx3
# gemma-3-1b-it, qwen3-14b, openai-7b-v0.1
MODEL_NAME = "openai-7b-v0.1"
API_URL = "http://127.0.0.1:8080"
def frage_gpt(prompt_text):
try:
response = requests.post(
API_URL + "/v1/chat/completions",
headers={"Content-Type": "application/json"},
data=json.dumps({
"model": MODEL_NAME,
"messages": [
{"role": "system", "content": "Du bist ein hilfreicher Assistent."},
{"role": "user", "content": prompt_text}
],
"stream": False
})
)
if response.status_code == 200:
return response.json() # response.text
else:
return f"Fehler: {response.status_code} - {response.text}"
except Exception as e:
return f"Fehler: {e}"
def frage_gpt_stream(prompt_text):
try:
response = requests.post(
API_URL + "/v1/chat/completions",
headers={"Content-Type": "application/json"},
data=json.dumps({
"model": MODEL_NAME,
"messages": [
{"role": "assistant", "content": "Du bist ein hilfreicher Assistent."},
{"role": "user", "content": prompt_text}
],
"stream": True
}),
stream=True # aktiviert Streaming
)
if response.status_code != 200:
print(f"Fehler: {response.status_code} - {response.text}")
return
# Antwort-Stream verarbeiten
for line in response.iter_lines():
if line:
decoded_line = line.decode("utf-8")
if decoded_line.startswith("data: "):
payload = json.loads(decoded_line[6:])
delta = payload.get("choices", [{}])[0].get("delta", {}).get("content")
if delta:
print(delta, end="", flush=True)
except Exception as e:
print("") #print(f"Fehler: {e}")
engine = pyttsx3.init()
engine.setProperty("rate", 150)
engine.setProperty("volume", 1.0)
voices = engine.getProperty("voices")
for voice in voices:
if "german" in voice.name.lower():
engine.setProperty("voice", voice.id)
break
while True:
frage = input("Frage: ")
frage_gpt_stream(frage)
#print(antwort["choices"][0]["message"]["content"])
#engine.say(antwort["choices"][0]["message"]["content"])
#engine.runAndWait()
Chat Completion
import openai
import os
# API-Key einfügen oder aus Umgebungsvariable lesen
openai.api_key = os.getenv("OPENAI_API_KEY") or "sk-xxx..."
def frage_gpt(prompt_text):
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": "Du bist ein hilfreicher Assistent."},
{"role": "user", "content": prompt_text}
],
temperature=0.7,
max_tokens=500
)
antwort = response['choices'][0]['message']['content']
return antwort.strip()
except Exception as e:
return f"Fehler: {e}"
# Beispiel
frage = input("Frage: ")
antwort = frage_gpt(frage)
print("Antwort: ", antwort)
Chat Completion mit Verlauf
import openai
import os
openai.api_key = os.getenv("OPENAI_API_KEY") or "sk-xxxxx"
chat_history = [
{"role": "system", "content": "Du bist ein hilfreicher Assistent."}
]
def frage_gpt_mit_verlauf(chat_history):
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=chat_history,
temperature=0.7,
max_tokens=500
)
antwort = response['choices'][0]['message']['content']
return antwort.strip()
except Exception as e:
return f"Fehler: {e}"
def frage_gpt_stream_mit_verlauf(chat_history):
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=chat_history,
temperature=0.7,
max_tokens=500,
stream=True
)
full_answer = ""
for chunk in response:
if 'choices' in chunk and len(chunk['choices']) > 0:
delta = chunk['choices'][0]['delta']
content = delta.get("content", "")
print(content, end="", flush=True)
full_answer += content
print()
return full_answer.strip()
except Exception as e:
return f"Fehler: {e}"
def main():
while True:
frage = input("Frage: ").strip()
if frage.lower() in ['exit', 'quit']:
print("Chat beendet.")
break
chat_history.append({"role": "user", "content": frage})
antwort = frage_gpt_stream_mit_verlauf(chat_history)
#antwort = frage_gpt_mit_verlauf(chat_history) # falls du lieber ohne Streaming willst
chat_history.append({"role": "assistant", "content": antwort})
if __name__ == "__main__":
main()
Function Calling
import openai
import os
import json
# API-Key einfügen oder aus Umgebungsvariable lesen
openai.api_key = os.getenv("OPENAI_API_KEY") or "sk-xxx..."
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current temperature for a given location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country e.g. Paris, France"
}
},
"required": ["location"]
}
}
}
]
messages = [{"role": "user", "content": "What is the weather like in Paris today?"}]
response = openai.ChatCompletion.create(
model="gpt-4-0613", # Modell muss Function Calling unterstützen
messages=messages,
tools=tools,
tool_choice="auto"
)
# Funktion aufrufen, wenn GPT es will
response_message = response['choices'][0]['message']
tool_calls = response_message.get("tool_calls", [])
def get_weather(location):
return f"The current temperature in {location} is 23 degrees and sunny."
if tool_calls:
for call in tool_calls:
function_name = call['function']['name']
arguments = json.loads(call['function']['arguments'])
# Funktion ausführen
if function_name == "get_weather":
location = arguments['location']
result = get_weather(location)
# Ergebnis zurück an GPT senden
messages.append(response_message)
messages.append({
"role": "tool",
"tool_call_id": call['id'],
"name": function_name,
"content": result
})
# GPT formuliert endgültige Antwort
followup = openai.ChatCompletion.create(
model="gpt-4-0613",
messages=messages
)
print(followup['choices'][0]['message']['content'])
else:
# Kein Tool wurde verwendet
print(response_message['content'])