Dies ist eine alte Version des Dokuments!
import openai import os # API-Key einfügen oder aus Umgebungsvariable lesen openai.api_key = os.getenv("OPENAI_API_KEY") or "sk-xxx..." def frage_gpt(prompt_text): try: response = openai.ChatCompletion.create( model="gpt-4", messages=[ {"role": "system", "content": "Du bist ein hilfreicher Assistent."}, {"role": "user", "content": prompt_text} ], temperature=0.7, max_tokens=500 ) antwort = response['choices'][0]['message']['content'] return antwort.strip() except Exception as e: return f"Fehler: {e}" # Beispiel frage = input("Frage: ") antwort = frage_gpt(frage) print("Antwort: ", antwort)
import openai import os openai.api_key = os.getenv("OPENAI_API_KEY") or "sk-xxxxx" chat_history = [ {"role": "system", "content": "Du bist ein hilfreicher Assistent."} ] def frage_gpt_mit_verlauf(chat_history): try: response = openai.ChatCompletion.create( model="gpt-4", messages=chat_history, temperature=0.7, max_tokens=500 ) antwort = response['choices'][0]['message']['content'] return antwort.strip() except Exception as e: return f"Fehler: {e}" def frage_gpt_stream_mit_verlauf(chat_history): try: response = openai.ChatCompletion.create( model="gpt-4", messages=chat_history, temperature=0.7, max_tokens=500, stream=True ) full_answer = "" for chunk in response: if 'choices' in chunk and len(chunk['choices']) > 0: delta = chunk['choices'][0]['delta'] content = delta.get("content", "") print(content, end="", flush=True) full_answer += content print() return full_answer.strip() except Exception as e: return f"Fehler: {e}" def main(): while True: frage = input("Frage: ").strip() if frage.lower() in ['exit', 'quit']: print("Chat beendet.") break chat_history.append({"role": "user", "content": frage}) antwort = frage_gpt_stream_mit_verlauf(chat_history) #antwort = frage_gpt_mit_verlauf(chat_history) # falls du lieber ohne Streaming willst chat_history.append({"role": "assistant", "content": antwort}) if __name__ == "__main__": main()
import openai import os import json # API-Key einfügen oder aus Umgebungsvariable lesen openai.api_key = os.getenv("OPENAI_API_KEY") or "sk-xxx..." tools = [ { "type": "function", "function": { "name": "get_weather", "description": "Get current temperature for a given location.", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "City and country e.g. Paris, France" } }, "required": ["location"] } } } ] messages = [{"role": "user", "content": "What is the weather like in Paris today?"}] response = openai.ChatCompletion.create( model="gpt-4-0613", # Modell muss Function Calling unterstützen messages=messages, tools=tools, tool_choice="auto" ) # Funktion aufrufen, wenn GPT es will response_message = response['choices'][0]['message'] tool_calls = response_message.get("tool_calls", []) def get_weather(location): return f"The current temperature in {location} is 23 degrees and sunny." if tool_calls: for call in tool_calls: function_name = call['function']['name'] arguments = json.loads(call['function']['arguments']) # Funktion ausführen if function_name == "get_weather": location = arguments['location'] result = get_weather(location) # Ergebnis zurück an GPT senden messages.append(response_message) messages.append({ "role": "tool", "tool_call_id": call['id'], "name": function_name, "content": result }) # GPT formuliert endgültige Antwort followup = openai.ChatCompletion.create( model="gpt-4-0613", messages=messages ) print(followup['choices'][0]['message']['content']) else: # Kein Tool wurde verwendet print(response_message['content'])