Comments (105)
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
from chatgpt-next-web-langchain.
I looked at the documentation for this service, and it seems like they provide an API structure compatible with OpenAI (https://developers.sber.ru/docs/ru/gigachat/api/keeping-context).
In that case, you just need to set the BASE_URL
and CUSTOM_MODELS
parameters to use this service.
For example:
BASE_URL=https://gigachat.devices.sberbank.ru/api
CUSTOM_MODELS=-all,GigaChat:latest,GigaChat-Plus,GigaChat-Pro
You can also configure the OpenAI Endpoint on the UI: https://gigachat.devices.sberbank.ru/api
However, since their API is not compatible with tools
, you cannot use the plugin. Also, I cannot verify the effectiveness of the mentioned operations because I don't have the ability to register an account.
from chatgpt-next-web-langchain.
я вставил https://gigachat.devices.sberbank.ru и указал ключ
вышла ошибка
{"error": {"code": "500", "message": "A server error has occurred"}}
во вложении код работает
гига.txt
from chatgpt-next-web-langchain.
Can you show me the detailed HTTP request information? I am unable to download the attachment you provided.
from chatgpt-next-web-langchain.
могу для теста предоставить ключ
import os
import requests
import urllib3
import uuid
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QTextEdit, QLabel, QVBoxLayout, QWidget, QDialog, QLineEdit, QGridLayout, QHBoxLayout, QShortcut, QComboBox, QSpacerItem, QSizePolicy, QInputDialog, QFormLayout
from PyQt5.QtCore import QSettings, Qt
from PyQt5.QtGui import QPalette, QColor, QKeySequence, QFont
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
API_URL = "https://gigachat.devices.sberbank.ru/api/v1/chat/completions"
OAUTH_URL = "https://ngw.devices.sberbank.ru:9443/api/v2/oauth"
SCOPE = "GIGACHAT_API_PERS"
FILE_URL = "https://gigachat.devices.sberbank.ru/api/v1/files/{file_id}/content"
EMBEDDINGS_URL = "https://gigachat.devices.sberbank.ru/api/v1/embeddings"
TOKENS_COUNT_URL = "https://gigachat.devices.sberbank.ru/api/v1/tokens/count"
DESKTOP_PATH = os.path.join(os.path.expanduser("~"), "Desktop")
IMAGES_FOLDER = os.path.join(DESKTOP_PATH, "Giga pfoto")
if not os.path.exists(IMAGES_FOLDER):
os.makedirs(IMAGES_FOLDER)
def get_token(CLIENT_SECRET):
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic ' + CLIENT_SECRET,
'RqUID': str(uuid.uuid4())
}
data = {
'scope': SCOPE,
'grant_type': 'client_credentials',
}
response = requests.post(OAUTH_URL, headers=headers, data=data, verify=False)
if response.text:
return response.json().get("access_token")
else:
return None
def send_request(question, token, temperature, model, chat_history):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
data = {
"model": model,
"messages": chat_history + [{"role": "user", "content": question}],
"temperature": temperature
}
response = requests.post(API_URL, headers=headers, json=data, verify=False)
if response.text:
return response.json()
else:
return None
def get_image(file_id, token):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
response = requests.get(FILE_URL.format(file_id=file_id), headers=headers, verify=False)
if response.content:
return response.content
else:
return None
def get_embeddings(texts, token, model="Embeddings"):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
data = {
"model": model,
"input": texts
}
response = requests.post(EMBEDDINGS_URL, headers=headers, json=data, verify=False)
if response.text:
return response.json()
else:
return None
def get_tokens_count(texts, token, model="GigaChat"):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
data = {
"model": model,
"input": texts
}
response = requests.post(TOKENS_COUNT_URL, headers=headers, json=data, verify=False)
if response.text:
return response.json()
else:
return None
class PromptDialog(QDialog):
def init(self, parent=None):
super(PromptDialog, self).init(parent)
self.setWindowTitle("Выберите промт")
self.settings = QSettings('mycompany', 'myapp')
self.layout = QVBoxLayout()
self.form_layout = QFormLayout()
self.prompt_edits = {}
for i in range(1, 5):
name_edit = QLineEdit(self)
name_edit.setText(self.settings.value(f"prompt{i}_name", ""))
desc_edit = QLineEdit(self)
desc_edit.setText(self.settings.value(f"prompt{i}_desc", ""))
self.prompt_edits[f"prompt{i}"] = (name_edit, desc_edit)
self.form_layout.addRow(QLabel(f"ПРОМТ{i} Название:"), name_edit)
self.form_layout.addRow(QLabel(f"ПРОМТ{i} Описание:"), desc_edit)
self.layout.addLayout(self.form_layout)
self.prompt_combo = QComboBox(self)
self.prompt_combo.addItems([f"prompt{i}" for i in range(1, 5)])
self.prompt_combo.setCurrentText(self.settings.value("selected_prompt", "prompt1"))
self.layout.addWidget(self.prompt_combo)
self.button = QPushButton("OK", self)
self.button.clicked.connect(self.handleButton)
self.layout.addWidget(self.button)
self.setLayout(self.layout)
palette = self.palette()
palette.setColor(QPalette.ColorRole.Window, QColor("#808080"))
self.setPalette(palette)
def handleButton(self):
for i in range(1, 5):
self.settings.setValue(f"prompt{i}_name", self.prompt_edits[f"prompt{i}"][0].text())
self.settings.setValue(f"prompt{i}_desc", self.prompt_edits[f"prompt{i}"][1].text())
selected_prompt = self.prompt_combo.currentText()
self.settings.setValue("selected_prompt", selected_prompt)
self.accept()
class SettingsDialog(QDialog):
def init(self, parent=None):
super(SettingsDialog, self).init(parent)
self.setWindowTitle("Настройки")
self.settings = QSettings('mycompany', 'myapp')
self.layout = QVBoxLayout()
self.label = QLabel("Введите CLIENT_SECRET:")
self.layout.addWidget(self.label)
self.edit = QLineEdit(self)
self.edit.setText(self.settings.value("client_secret"))
self.layout.addWidget(self.edit)
self.temperature_label = QLabel("Температура:")
self.layout.addWidget(self.temperature_label)
self.temperature_edit = QLineEdit(self)
self.temperature_edit.setText(self.settings.value("temperature"))
self.layout.addWidget(self.temperature_edit)
self.model_label = QLabel("Модель:")
self.layout.addWidget(self.model_label)
self.model_combo = QComboBox(self)
self.model_combo.addItems(["GigaChat-Pro", "GigaChat-Plus", "GigaChat:latest"])
self.model_combo.setCurrentText(self.settings.value("model", defaultValue="GigaChat-Plus"))
self.layout.addWidget(self.model_combo)
self.button = QPushButton("OK", self)
self.button.clicked.connect(self.handleButton)
self.layout.addWidget(self.button)
self.setLayout(self.layout)
palette = self.palette()
palette.setColor(QPalette.ColorRole.Window, QColor("#808080"))
self.setPalette(palette)
def handleButton(self):
self.settings.setValue("client_secret", self.edit.text())
self.settings.setValue("temperature", self.temperature_edit.text())
self.settings.setValue("model", self.model_combo.currentText())
self.accept()
class MainWindow(QMainWindow):
def init(self, parent=None):
super(MainWindow, self).init(parent)
self.settings = QSettings('mycompany', 'myapp')
self.setWindowTitle("GigaChat")
self.centralWidget = QWidget()
self.layout = QGridLayout(self.centralWidget)
self.answer_text = QTextEdit(self)
self.layout.addWidget(self.answer_text, 0, 0)
self.question_text = QTextEdit(self)
self.question_text.setFixedHeight(200)
self.layout.addWidget(self.question_text, 1, 0)
self.button_layout = QHBoxLayout()
self.button_layout.setSpacing(5)
self.button_layout.setContentsMargins(0, 0, 0, 0)
self.button = QPushButton("ПУСК", self)
self.button.clicked.connect(self.on_button_click)
self.set_button_color(self.button)
self.button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.button_layout.addWidget(self.button)
self.stop_button = QPushButton("СТОП", self)
self.stop_button.clicked.connect(self.on_stop_button_click)
self.set_button_color(self.stop_button)
self.stop_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.stop_button.hide()
self.button_layout.addWidget(self.stop_button)
self.clear_button = QPushButton("ОЧИСТИТЬ", self)
self.clear_button.clicked.connect(self.on_clear_button_click)
self.set_button_color(self.clear_button)
self.clear_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.button_layout.addWidget(self.clear_button)
self.settings_button = QPushButton("Настройки", self)
self.settings_button.clicked.connect(self.on_settings_button_click)
self.set_button_color(self.settings_button)
self.settings_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.button_layout.addWidget(self.settings_button)
self.prompt_button = QPushButton("ПРОМТ", self)
self.prompt_button.clicked.connect(self.on_prompt_button_click)
self.set_button_color(self.prompt_button)
self.prompt_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.button_layout.addWidget(self.prompt_button)
self.tokens_label = QLabel("0/0", self)
self.tokens_label.setFont(QFont("Arial", 16))
self.tokens_label.setStyleSheet("QLabel { color : white; }")
self.button_layout.addWidget(self.tokens_label)
self.prompt_label = QLabel(self)
self.prompt_label.setFont(QFont("Arial", 16))
self.prompt_label.setStyleSheet("QLabel { color : white; }")
self.button_layout.addWidget(self.prompt_label)
self.model_label = QLabel(self.settings.value("model", defaultValue="GigaChat-Plus"), self)
self.model_label.setFont(QFont("Arial", 16))
self.model_label.setStyleSheet("QLabel { color : white; }")
self.button_layout.addWidget(self.model_label)
self.layout.addLayout(self.button_layout, 2, 0)
self.layout.setAlignment(Qt.AlignmentFlag.AlignBottom)
self.setCentralWidget(self.centralWidget)
self.shortcut = QShortcut(QKeySequence("Ctrl+Return"), self.centralWidget)
self.shortcut.activated.connect(self.on_button_click)
self.is_writing = False
self.chat_history = []
self.selected_prompt = self.settings.value("selected_prompt", "prompt1")
self.update_prompt_label()
def set_button_color(self, button):
palette = button.palette()
palette.setColor(QPalette.ColorRole.Button, QColor("#008400"))
button.setPalette(palette)
def update_prompt_label(self):
selected_prompt_name = self.settings.value(f"{self.selected_prompt}_name", "")
self.prompt_label.setText(selected_prompt_name)
def on_button_click(self):
self.is_writing = True
self.stop_button.show()
question = self.question_text.toPlainText()
selected_prompt_text = self.settings.value(f"{self.selected_prompt}_desc", "")
if selected_prompt_text:
question = selected_prompt_text + " " + question
CLIENT_SECRET = self.settings.value("client_secret")
token = get_token(CLIENT_SECRET)
temperature = float(self.settings.value("temperature", defaultValue=0.7))
model = self.settings.value("model", defaultValue="GigaChat-Plus")
if token and self.is_writing:
response = send_request(question, token, temperature, model, self.chat_history)
sent_tokens = get_tokens_count([question], token, model)
if sent_tokens:
sent_tokens_count = sent_tokens[0]["tokens"]
if response and "choices" in response:
answer = response["choices"][0]["message"]["content"]
received_tokens = get_tokens_count([answer], token, model)
if received_tokens:
received_tokens_count = received_tokens[0]["tokens"]
self.tokens_label.setText(f"{sent_tokens_count}/{received_tokens_count}")
self.answer_text.insertPlainText(answer)
self.chat_history.append({"role": "user", "content": question})
self.chat_history.append({"role": "assistant", "content": answer})
if "<img src=" in answer:
file_id = answer.split('"')[1]
image_data = get_image(file_id, token)
if image_data:
file_name = os.path.join(IMAGES_FOLDER, file_id + ".jpg")
with open(file_name, "wb") as f:
f.write(image_data)
self.answer_text.insertPlainText(f"\nИзображение сохранено в файле {file_name}")
else:
self.answer_text.insertPlainText("\nОшибка: не удалось получить изображение")
else:
self.answer_text.insertPlainText("Ошибка: сервер вернул неожиданный ответ")
else:
self.answer_text.insertPlainText("Ошибка: не удалось получить токен")
self.stop_button.hide()
self.is_writing = False
def on_stop_button_click(self):
self.is_writing = False
self.stop_button.hide()
def on_clear_button_click(self):
self.question_text.clear()
self.answer_text.clear()
self.chat_history = []
def on_settings_button_click(self):
dlg = SettingsDialog(self)
if dlg.exec():
self.set_button_color(self.button)
self.set_button_color(self.clear_button)
self.set_button_color(self.settings_button)
self.model_label.setText(self.settings.value("model", defaultValue="GigaChat-Plus"))
def on_prompt_button_click(self):
dlg = PromptDialog(self)
if dlg.exec():
self.selected_prompt = self.settings.value("selected_prompt", "prompt1")
self.update_prompt_label()
app = QApplication(sys.argv)
app.setStyle('Fusion')
palette = QPalette()
palette.setColor(QPalette.ColorRole.Window, QColor("#0000ff"))
palette.setColor(QPalette.ColorRole.Base, QColor("#55557f"))
palette.setColor(QPalette.ColorRole.Text, QColor("#ffffff"))
app.setPalette(palette)
window = MainWindow()
window.resize(1200, 900)
window.show()
app.exec()
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
могу для теста предоставить ключ
import os
import requests
import urllib3
import uuid
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QTextEdit, QLabel, QVBoxLayout, QWidget, QDialog, QLineEdit, QGridLayout, QHBoxLayout, QShortcut, QComboBox, QSpacerItem, QSizePolicy, QInputDialog, QFormLayout
from PyQt5.QtCore import QSettings, Qt
from PyQt5.QtGui import QPalette, QColor, QKeySequence, QFont
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
API_URL = "https://gigachat.devices.sberbank.ru/api/v1/chat/completions"
OAUTH_URL = "https://ngw.devices.sberbank.ru:9443/api/v2/oauth"
SCOPE = "GIGACHAT_API_PERS"
FILE_URL = "https://gigachat.devices.sberbank.ru/api/v1/files/{file_id}/content"
EMBEDDINGS_URL = "https://gigachat.devices.sberbank.ru/api/v1/embeddings"
TOKENS_COUNT_URL = "https://gigachat.devices.sberbank.ru/api/v1/tokens/count"
DESKTOP_PATH = os.path.join(os.path.expanduser("~"), "Desktop")
IMAGES_FOLDER = os.path.join(DESKTOP_PATH, "Giga pfoto")
if not os.path.exists(IMAGES_FOLDER):
os.makedirs(IMAGES_FOLDER)
def get_token(CLIENT_SECRET):
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic ' + CLIENT_SECRET,
'RqUID': str(uuid.uuid4())
}
data = {
'scope': SCOPE,
'grant_type': 'client_credentials',
}
response = requests.post(OAUTH_URL, headers=headers, data=data, verify=False)
if response.text:
return response.json().get("access_token")
else:
return None
def send_request(question, token, temperature, model, chat_history):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
data = {
"model": model,
"messages": chat_history + [{"role": "user", "content": question}],
"temperature": temperature
}
response = requests.post(API_URL, headers=headers, json=data, verify=False)
if response.text:
return response.json()
else:
return None
def get_image(file_id, token):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
response = requests.get(FILE_URL.format(file_id=file_id), headers=headers, verify=False)
if response.content:
return response.content
else:
return None
def get_embeddings(texts, token, model="Embeddings"):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
data = {
"model": model,
"input": texts
}
response = requests.post(EMBEDDINGS_URL, headers=headers, json=data, verify=False)
if response.text:
return response.json()
else:
return None
def get_tokens_count(texts, token, model="GigaChat"):
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
data = {
"model": model,
"input": texts
}
response = requests.post(TOKENS_COUNT_URL, headers=headers, json=data, verify=False)
if response.text:
return response.json()
else:
return None
class PromptDialog(QDialog):
def init(self, parent=None):
super(PromptDialog, self).init(parent)
self.setWindowTitle("Выберите промт")
self.settings = QSettings('mycompany', 'myapp')
self.layout = QVBoxLayout()
self.form_layout = QFormLayout()
self.prompt_edits = {}
for i in range(1, 5):
name_edit = QLineEdit(self)
name_edit.setText(self.settings.value(f"prompt{i}_name", ""))
desc_edit = QLineEdit(self)
desc_edit.setText(self.settings.value(f"prompt{i}_desc", ""))
self.prompt_edits[f"prompt{i}"] = (name_edit, desc_edit)
self.form_layout.addRow(QLabel(f"ПРОМТ{i} Название:"), name_edit)
self.form_layout.addRow(QLabel(f"ПРОМТ{i} Описание:"), desc_edit)
self.layout.addLayout(self.form_layout)
self.prompt_combo = QComboBox(self)
self.prompt_combo.addItems([f"prompt{i}" for i in range(1, 5)])
self.prompt_combo.setCurrentText(self.settings.value("selected_prompt", "prompt1"))
self.layout.addWidget(self.prompt_combo)
self.button = QPushButton("OK", self)
self.button.clicked.connect(self.handleButton)
self.layout.addWidget(self.button)
self.setLayout(self.layout)
palette = self.palette()
palette.setColor(QPalette.ColorRole.Window, QColor("#808080"))
self.setPalette(palette)
def handleButton(self):
for i in range(1, 5):
self.settings.setValue(f"prompt{i}_name", self.prompt_edits[f"prompt{i}"][0].text())
self.settings.setValue(f"prompt{i}_desc", self.prompt_edits[f"prompt{i}"][1].text())
selected_prompt = self.prompt_combo.currentText()
self.settings.setValue("selected_prompt", selected_prompt)
self.accept()
class SettingsDialog(QDialog):
def init(self, parent=None):
super(SettingsDialog, self).init(parent)
self.setWindowTitle("Настройки")
self.settings = QSettings('mycompany', 'myapp')
self.layout = QVBoxLayout()
self.label = QLabel("Введите CLIENT_SECRET:")
self.layout.addWidget(self.label)
self.edit = QLineEdit(self)
self.edit.setText(self.settings.value("client_secret"))
self.layout.addWidget(self.edit)
self.temperature_label = QLabel("Температура:")
self.layout.addWidget(self.temperature_label)
self.temperature_edit = QLineEdit(self)
self.temperature_edit.setText(self.settings.value("temperature"))
self.layout.addWidget(self.temperature_edit)
self.model_label = QLabel("Модель:")
self.layout.addWidget(self.model_label)
self.model_combo = QComboBox(self)
self.model_combo.addItems(["GigaChat-Pro", "GigaChat-Plus", "GigaChat:latest"])
self.model_combo.setCurrentText(self.settings.value("model", defaultValue="GigaChat-Plus"))
self.layout.addWidget(self.model_combo)
self.button = QPushButton("OK", self)
self.button.clicked.connect(self.handleButton)
self.layout.addWidget(self.button)
self.setLayout(self.layout)
palette = self.palette()
palette.setColor(QPalette.ColorRole.Window, QColor("#808080"))
self.setPalette(palette)
def handleButton(self):
self.settings.setValue("client_secret", self.edit.text())
self.settings.setValue("temperature", self.temperature_edit.text())
self.settings.setValue("model", self.model_combo.currentText())
self.accept()
class MainWindow(QMainWindow):
def init(self, parent=None):
super(MainWindow, self).init(parent)
self.settings = QSettings('mycompany', 'myapp')
self.setWindowTitle("GigaChat")
self.centralWidget = QWidget()
self.layout = QGridLayout(self.centralWidget)
self.answer_text = QTextEdit(self)
self.layout.addWidget(self.answer_text, 0, 0)
self.question_text = QTextEdit(self)
self.question_text.setFixedHeight(200)
self.layout.addWidget(self.question_text, 1, 0)
self.button_layout = QHBoxLayout()
self.button_layout.setSpacing(5)
self.button_layout.setContentsMargins(0, 0, 0, 0)
self.button = QPushButton("ПУСК", self)
self.button.clicked.connect(self.on_button_click)
self.set_button_color(self.button)
self.button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.button_layout.addWidget(self.button)
self.stop_button = QPushButton("СТОП", self)
self.stop_button.clicked.connect(self.on_stop_button_click)
self.set_button_color(self.stop_button)
self.stop_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.stop_button.hide()
self.button_layout.addWidget(self.stop_button)
self.clear_button = QPushButton("ОЧИСТИТЬ", self)
self.clear_button.clicked.connect(self.on_clear_button_click)
self.set_button_color(self.clear_button)
self.clear_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.button_layout.addWidget(self.clear_button)
self.settings_button = QPushButton("Настройки", self)
self.settings_button.clicked.connect(self.on_settings_button_click)
self.set_button_color(self.settings_button)
self.settings_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.button_layout.addWidget(self.settings_button)
self.prompt_button = QPushButton("ПРОМТ", self)
self.prompt_button.clicked.connect(self.on_prompt_button_click)
self.set_button_color(self.prompt_button)
self.prompt_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.button_layout.addWidget(self.prompt_button)
self.tokens_label = QLabel("0/0", self)
self.tokens_label.setFont(QFont("Arial", 16))
self.tokens_label.setStyleSheet("QLabel { color : white; }")
self.button_layout.addWidget(self.tokens_label)
self.prompt_label = QLabel(self)
self.prompt_label.setFont(QFont("Arial", 16))
self.prompt_label.setStyleSheet("QLabel { color : white; }")
self.button_layout.addWidget(self.prompt_label)
self.model_label = QLabel(self.settings.value("model", defaultValue="GigaChat-Plus"), self)
self.model_label.setFont(QFont("Arial", 16))
self.model_label.setStyleSheet("QLabel { color : white; }")
self.button_layout.addWidget(self.model_label)
self.layout.addLayout(self.button_layout, 2, 0)
self.layout.setAlignment(Qt.AlignmentFlag.AlignBottom)
self.setCentralWidget(self.centralWidget)
self.shortcut = QShortcut(QKeySequence("Ctrl+Return"), self.centralWidget)
self.shortcut.activated.connect(self.on_button_click)
self.is_writing = False
self.chat_history = []
self.selected_prompt = self.settings.value("selected_prompt", "prompt1")
self.update_prompt_label()
def set_button_color(self, button):
palette = button.palette()
palette.setColor(QPalette.ColorRole.Button, QColor("#008400"))
button.setPalette(palette)
def update_prompt_label(self):
selected_prompt_name = self.settings.value(f"{self.selected_prompt}_name", "")
self.prompt_label.setText(selected_prompt_name)
def on_button_click(self):
self.is_writing = True
self.stop_button.show()
question = self.question_text.toPlainText()
selected_prompt_text = self.settings.value(f"{self.selected_prompt}_desc", "")
if selected_prompt_text:
question = selected_prompt_text + " " + question
CLIENT_SECRET = self.settings.value("client_secret")
token = get_token(CLIENT_SECRET)
temperature = float(self.settings.value("temperature", defaultValue=0.7))
model = self.settings.value("model", defaultValue="GigaChat-Plus")
if token and self.is_writing:
response = send_request(question, token, temperature, model, self.chat_history)
sent_tokens = get_tokens_count([question], token, model)
if sent_tokens:
sent_tokens_count = sent_tokens[0]["tokens"]
if response and "choices" in response:
answer = response["choices"][0]["message"]["content"]
received_tokens = get_tokens_count([answer], token, model)
if received_tokens:
received_tokens_count = received_tokens[0]["tokens"]
self.tokens_label.setText(f"{sent_tokens_count}/{received_tokens_count}")
self.answer_text.insertPlainText(answer)
self.chat_history.append({"role": "user", "content": question})
self.chat_history.append({"role": "assistant", "content": answer})
if "<img src=" in answer:
file_id = answer.split('"')[1]
image_data = get_image(file_id, token)
if image_data:
file_name = os.path.join(IMAGES_FOLDER, file_id + ".jpg")
with open(file_name, "wb") as f:
f.write(image_data)
self.answer_text.insertPlainText(f"\nИзображение сохранено в файле {file_name}")
else:
self.answer_text.insertPlainText("\nОшибка: не удалось получить изображение")
else:
self.answer_text.insertPlainText("Ошибка: сервер вернул неожиданный ответ")
else:
self.answer_text.insertPlainText("Ошибка: не удалось получить токен")
self.stop_button.hide()
self.is_writing = False
def on_stop_button_click(self):
self.is_writing = False
self.stop_button.hide()
def on_clear_button_click(self):
self.question_text.clear()
self.answer_text.clear()
self.chat_history = []
def on_settings_button_click(self):
dlg = SettingsDialog(self)
if dlg.exec():
self.set_button_color(self.button)
self.set_button_color(self.clear_button)
self.set_button_color(self.settings_button)
self.model_label.setText(self.settings.value("model", defaultValue="GigaChat-Plus"))
def on_prompt_button_click(self):
dlg = PromptDialog(self)
if dlg.exec():
self.selected_prompt = self.settings.value("selected_prompt", "prompt1")
self.update_prompt_label()
app = QApplication(sys.argv)
app.setStyle('Fusion')
palette = QPalette()
palette.setColor(QPalette.ColorRole.Window, QColor("#0000ff"))
palette.setColor(QPalette.ColorRole.Base, QColor("#55557f"))
palette.setColor(QPalette.ColorRole.Text, QColor("#ffffff"))
app.setPalette(palette)
window = MainWindow()
window.resize(1200, 900)
window.show()
app.exec()
from chatgpt-next-web-langchain.
Можете ли вы показать мне подробную информацию о HTTP-запросе? Я не могу скачать предоставленный вами вложение.
import webbrowser
import requests
import json
import uuid
import os
import logging
from flask import Flask, request, jsonify, render_template, send_from_directory
API_URL = "https://gigachat.devices.sberbank.ru/api/v1/chat/completions"
TOKEN_COUNT_URL = "https://gigachat.devices.sberbank.ru/api/v1/tokens/count"
OAUTH_URL = "https://ngw.devices.sberbank.ru:9443/api/v2/oauth"
SCOPE = "GIGACHAT_API_PERS"
FILE_URL = "https://gigachat.devices.sberbank.ru/api/v1/files/{file_id}/content"
DESKTOP_PATH = os.path.join(os.path.expanduser("~"), "Desktop")
IMAGES_FOLDER = os.path.join(DESKTOP_PATH, "GigaPhoto")
if not os.path.exists(IMAGES_FOLDER):
os.makedirs(IMAGES_FOLDER)
app = Flask(name)
Настройка логирования
logging.basicConfig(filename=os.path.expanduser('~/Desktop/api_logs.txt'), level=logging.INFO)
def log_request_response(url, headers, data, response):
logging.info(f"URL: {url}")
logging.info(f"Request Headers: {headers}")
logging.info(f"Request Body: {data}")
logging.info(f"Response Status Code: {response.status_code}")
logging.info(f"Response Headers: {response.headers}")
logging.info(f"Response Body: {response.text}")
logging.info("\n\n")
def get_token(CLIENT_SECRET):
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic ' + CLIENT_SECRET,
'RqUID': str(uuid.uuid4())
}
data = {
'scope': SCOPE,
'grant_type': 'client_credentials',
}
response = requests.post(OAUTH_URL, headers=headers, data=data, verify=False)
log_request_response(OAUTH_URL, headers, data, response)
if response.text:
return response.json().get("access_token")
else:
return None
def send_request(question, token, temperature, model, chat_history, max_tokens=32000):
if not token or not question:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
data = {
"model": model,
"messages": chat_history + [{"role": "user", "content": question}],
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": 0.1,
"n": 1,
"stream": False,
"repetition_penalty": 1,
"update_interval": 0
}
try:
response = requests.post(API_URL, headers=headers, json=data, verify=False, timeout=600) # Увеличено время допустимого таймаута
log_request_response(API_URL, headers, data, response)
if response.text:
return response.json()
else:
return None
except Exception as e:
logging.error(f"Error occurred: {str(e)}") # Логирование ошибок
return None
def get_image(file_id, token):
if not token:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
response = requests.get(FILE_URL.format(file_id=file_id), headers=headers, verify=False)
log_request_response(FILE_URL.format(file_id=file_id), headers, None, response)
if response.content:
return response.content
else:
return None
def count_tokens(text, model, token):
if not token:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
}
data = {
"model": model,
"input": [text]
}
response = requests.post(TOKEN_COUNT_URL, headers=headers, json=data, verify=False)
if response.text:
return response.json()
else:
return None
@app.route('/', methods=['GET'])
def home():
return render_template('index.html')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/', methods=['POST'])
def process():
data = request.get_json()
question = data.get('question')
prompt = data.get('prompt') # Получение промпта из запроса
CLIENT_SECRET = data.get('client_secret')
temperature = float(data.get('temperature', 0.2))
model = data.get('model', "GigaChat-Plus") # Получение модели из запроса
token = get_token(CLIENT_SECRET)
chat_history = data.get('chat_history', []) # Получение истории чата из запроса
if token:
# Добавление промпта к вопросу
try:
response = send_request(prompt + question if prompt else question, token, temperature, model, chat_history, max_tokens=32000)
token_count = count_tokens(prompt + question if prompt else question, model, token)
if response and "choices" in response:
answer = response["choices"][0]["message"]["content"]
answer_token_count = count_tokens(answer, model, token) # Подсчет токенов в ответе
if "<img src=" in answer:
file_id = answer.split('"')[1]
image_data = get_image(file_id, token)
if image_data:
file_name = os.path.join(IMAGES_FOLDER, file_id + ".jpg")
with open(file_name, "wb") as f:
f.write(image_data)
answer += f"\nИзображение сохранено в файле {file_name}"
else:
answer += "\nОшибка: не удалось получить изображение"
return jsonify({'text': answer, 'token_count': token_count[0], 'answer_token_count': answer_token_count[0]}) # Возвращение количества токенов в ответе
else:
return jsonify({'error': 'Server returned unexpected response'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
else:
return jsonify({'error': 'Failed to get token'}), 500
if name == 'main':
webbrowser.open("http://localhost:5000")
app.run(host='0.0.0.0', port=5000)
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Can you show me the details of the HTTP request? I can't download the attachment you provided.
import webbrowser
import requests
import json
import uuid
import os
import logging
from flask import Flask, request, jsonify, render_template, send_from_directory
API_URL = "https://gigachat.devices.sberbank.ru/api/v1/chat/completions"
TOKEN_COUNT_URL = "https://gigachat.devices.sberbank.ru/api/v1/tokens/count"
OAUTH_URL = "https://ngw.devices.sberbank.ru:9443/api/v2/oauth"
SCOPE = "GIGACHAT_API_PERS"
FILE_URL = "https://gigachat.devices.sberbank.ru/api/v1/files/{file_id}/content"
DESKTOP_PATH = os.path.join(os.path.expanduser("~"), "Desktop")
IMAGES_FOLDER = os.path.join(DESKTOP_PATH, "GigaPhoto")
if not os.path.exists(IMAGES_FOLDER):
os.makedirs(IMAGES_FOLDER)
app = Flask(name)
Setting up logging
logging.basicConfig(filename=os.path.expanduser('~/Desktop/api_logs.txt'), level=logging.INFO)
def log_request_response(url, headers, data, response):
logging.info(f"URL: {url}")
logging.info(f"Request Headers: {headers}")
logging.info(f"Request Body: {data}")
logging.info(f"Response Status Code: {response.status_code}")
logging.info(f"Response Headers: {response.headers}")
logging.info(f"Response Body: {response.text}")
logging.info("\n\n")
def get_token(CLIENT_SECRET):
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic' + CLIENT_SECRET,
'RqUID': str(uuid.uuid4())
}
data = {
'scope': SCOPE,
'grant_type': 'client_credentials',
}
response = requests.post(OAUTH_URL, headers=headers, data=data, verify=False)
log_request_response(OAUTH_URL, headers, data, response)
if response.text:
return response.json().get("access_token")
else:
return None
def send_request(question, token, temperature, model, chat_history, max_tokens=32000):
if not token or not question:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer' + token,
'RqUID': str(uuid.uuid4())
}
data = {
"model": model,
"messages": chat_history + [{"role": "user", "content": question}],
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": 0.1,
"n": 1,
"stream": False,
"repetition_penalty": 1,
"update_interval": 0
}
try:
response = requests.post(API_URL, headers=headers, json=data, verify=False, timeout=600) # Increased allowed timeout time
log_request_response(API_URL, headers, data, response)
if response.text:
return response.json()
else:
return None
except Exception as e:
logging.error(f"Error occurred: {str(e)}") # Log errors
return None
def get_image(file_id, token):
if not token:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer' + token,
'RqUID': str(uuid.uuid4())
}
response = requests.get(FILE_URL.format(file_id=file_id), headers=headers, verify=False)
log_request_response(FILE_URL.format(file_id=file_id), headers, None, response)
if response.content:
return response.content
else:
return None
def count_tokens(text, model, token):
if not token:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer' + token,
}
data = {
"model": model,
"input": [text]
}
response = requests.post(TOKEN_COUNT_URL, headers=headers, json=data, verify=False)
if response.text:
return response.json()
else:
return None
@app.route('/', methods=['GET'])
def home():
return render_template('index.html')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/', methods=['POST'])
def process():
data = request.get_json()
question = data.get('question')
prompt = data.get('prompt') # Getting prompt from request
CLIENT_SECRET = data.get('client_secret')
temperature = float(data.get('temperature', 0.2))
model = data.get('model', "GigaChat-Plus") # Retrieving the model from the request
token = get_token(CLIENT_SECRET)
chat_history = data.get('chat_history', []) # Get chat history from request
if token:
# Adding a prompt to a question
try:
response = send_request(prompt + question if prompt else question, token, temperature, model, chat_history, max_tokens=32000)
token_count = count_tokens(prompt + question if prompt else question, model, token)
if response and "choices" in response:
answer = response["choices"][0]["message"]["content"]
answer_token_count = count_tokens(answer, model, token) # Counting tokens in the answer
if "<img src=" in answer:
file_id = answer.split('"')[1]
image_data = get_image(file_id, token)
if image_data:
file_name = os.path.join(IMAGES_FOLDER, file_id + ".jpg")
with open(file_name, "wb") as f:
f.write(image_data)
answer += f"\nThe image is saved in the file {file_name}"
else:
answer += "\nError: failed to get image"
return jsonify({'text': answer, 'token_count': token_count[0], 'answer_token_count': answer_token_count[0]}) # Return the number of tokens in the response
else:
return jsonify({'error': 'Server returned unexpected response'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
else:
return jsonify({'error': 'Failed to get token'}), 500
if name == 'main':
webbrowser.open("http://localhost:5000")
app.run(host='0.0.0.0', port=5000)
from chatgpt-next-web-langchain.
I'll reply to you later.
from chatgpt-next-web-langchain.
The website "https://gigachat.devices.sberbank.ru" does not support cross-origin requests, so it can only be accessed through backend forwarding.
Additionally, since the website uses a self-signed certificate, you need to configure the environment variable NODE_TLS_REJECT_UNAUTHORIZED=0
to make Node.js ignore certificate validation.
Here are the specific steps to follow:
- Configure the
.env
file:
BASE_URL=https://gigachat.devices.sberbank.ru/api
- Set the environment variable:
export NODE_TLS_REJECT_UNAUTHORIZED=0
- Start the project:
yarn dev
from chatgpt-next-web-langchain.
можете сделать этот файл и показать куда его установить в какую папку? так же в каком файле вводить API и сервер?
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
can you make this file and show where to install it in which folder? also in which file should I enter the API and server?
from chatgpt-next-web-langchain.
If you are not familiar with running from source code, you can use docker, similar to the following command.
docker run -d -p 3000:3000 -e NODE_TLS_REJECT_UNAUTHORIZED="0" -e OPENAI_API_KEY="sk-xxxx" -e BASE_URL="https://gigachat.devices.sberbank.ru/api" gosuto/chatgpt-next-web-langchain:nightly
from chatgpt-next-web-langchain.
не получается у меня установить и запустить ошибка Docker Desktop requires the Server service to be enabled. Может как то по другому можно сделать запустить?
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
I can’t install and run the error Docker Desktop requires the Server service to be enabled. Maybe there is some other way to run it?
from chatgpt-next-web-langchain.
vercel?
from chatgpt-next-web-langchain.
ну или другой мне не важно какой лишь бы работал
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Well, or another, I don’t care which one, as long as it works
from chatgpt-next-web-langchain.
Then you can only run it through the source code. Please refer to the steps below.
- install nodejs runtime https://nodejs.org/en
- run
yarn
install npm package - create
.env
file
NODE_TLS_REJECT_UNAUTHORIZED=0
OPENAI_API_KEY=sk-xxxxxxxxxxx
BASE_URL=https://gigachat.devices.sberbank.ru/api
CUSTOM_MODELS=GigaChat
- run
yarn dev
- open for web browser
http://localhost:3000
from chatgpt-next-web-langchain.
я прописал ключ в файле, запустил, теперь что вводить конечная точка и api?
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
I registered the key in the file, launched it, now what about entering the endpoint and api?
from chatgpt-next-web-langchain.
There is no need to change here. After configuring the BASE_URL
, all requests for the openai model will be forwarded BASE_URL
the backend, so that the cross-domain problem can be avoided.
from chatgpt-next-web-langchain.
в другом приложении с ключом работает
в вашем приложении не работает пишет про ошибку api может модель нужно добавить и выбрать?
PS C:\Users\DEN\Desktop\ChatGPT-Next-Web-LangChain-2.11.2> yarn dev
yarn run v1.22.22
$ next dev
-
ready started server on 0.0.0.0:3000, url: http://localhost:3000
[Next] build mode standalone
[Next] build with chunk: true -
warn You have enabled experimental feature (forceSwcTransforms) in next.config.mjs.
-
warn Experimental features are not covered by semver, and may cause unexpected or broken application behavior. Use at your own risk.
-
event compiled client and server successfully in 4.1s (20 modules)
-
wait compiling...
-
event compiled client and server successfully in 615 ms (20 modules)
[Next] build mode standalone
[Next] build with chunk: true -
wait compiling /page (client and server)...
-
event compiled client and server successfully in 19.9s (4483 modules)
[Next] build mode standalone
[Next] build with chunk: true
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key -
wait compiling /api/config/route (client and server)...
-
event compiled successfully in 528 ms (105 modules)
[Server Config] using 1 of 1 api key -
wait compiling /api/langchain/tool/agent/edge/route (client and server)...
-
event compiled successfully in 3s (1239 modules)
[Server Config] using 1 of 1 api key
[Auth] allowed hashed codes: []
[Auth] got access code:
[Auth] hashed access code: d41d8cd98f00b204e9800998ecf8427e
[User IP] ::1
[Time] 13.03.2024, 18:22:55
[Server Config] using 1 of 1 api key
[Auth] admin did not provide an api key
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
[baseUrl] https://api.openai.com/v1
from chatgpt-next-web-langchain.
You need to choose the GigaChat model.
There is no need to modify any configuration on the interface after startup, except for the model.
from chatgpt-next-web-langchain.
There is also a problem with your baseUrl, is the .env file not created?
BaseUrl should not be https://api.openai.com/v1
from chatgpt-next-web-langchain.
Вам нужно выбрать модель GigaChat. После запуска нет необходимости изменять какую-либо конфигурацию интерфейса, за исключением модели.
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
You need to select the GigaChat model. Once launched, there is no need to change any interface configuration, except for the model.
from chatgpt-next-web-langchain.
Также существует проблема с вашим baseUrl: файл .env не создан? BaseUrl не должен быть https://api.openai.com/v1.
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
There is also a problem with your baseUrl: the .env file is not created? BaseUrl should not be https://api.openai.com/v1.
from chatgpt-next-web-langchain.
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
from chatgpt-next-web-langchain.
The name of the configuration file must be .env
.
In addition, your key is exposed, remember to modify it.
from chatgpt-next-web-langchain.
The name of the configuration file must be
.env
. In addition, your key is exposed, remember to modify it.
не помогло, файл переименовал добавил модель
from chatgpt-next-web-langchain.
вы можете сами все сделать проверить и мне выслать:) я уже запутался
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
you can check everything yourself and send it to me :) I’m already confused
from chatgpt-next-web-langchain.
Системе не удается найти указанный путь.
yarn run v1.22.22
$ next dev
- ready started server on 0.0.0.0:3000, url: http://localhost:3000
- info Loaded env from C:\Users\DEN\Desktop\ChatGPT-Next-Web-LangChain-2.11.2.env
[Next] build mode standalone
[Next] build with chunk: true - warn You have enabled experimental feature (forceSwcTransforms) in next.config.mjs.
- warn Experimental features are not covered by semver, and may cause unexpected or broken application behavior. Use at your own risk.
(node:4184) Warning: Setting the NODE_TLS_REJECT_UNAUTHORIZED environment variable to '0' makes TLS connections and HTTPS requests insecure by disabling certificate verification.
(Use node --trace-warnings ...
to show where the warning was created)
- event compiled client and server successfully in 1353 ms (20 modules)
- wait compiling...
- event compiled client and server successfully in 427 ms (20 modules)
[Next] build mode standalone
[Next] build with chunk: true - wait compiling /page (client and server)...
- event compiled client and server successfully in 9.5s (4483 modules)
[Next] build mode standalone
[Next] build with chunk: true
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key - wait compiling /api/config/route (client and server)...
- event compiled successfully in 941 ms (105 modules)
[Server Config] using 1 of 1 api key - wait compiling /api/openai/[...path]/route (client and server)...
- event compiled successfully in 453 ms (113 modules)
[Server Config] using 1 of 1 api key
[OpenAI Route] params { path: [ 'v1', 'chat', 'completions' ] }
[Server Config] using 1 of 1 api key
[Auth] allowed hashed codes: []
[Auth] got access code:
[Auth] hashed access code: d41d8cd98f00b204e9800998ecf8427e
[User IP] ::1
[Time] 13.03.2024, 19:07:19
[Server Config] using 1 of 1 api key
[Auth] use system api key
[Proxy] v1/chat/completions?path=v1&path=chat&path=completions
[Base Url] https://gigachat.devices.sberbank.ru/api
(node:4364) Warning: Setting the NODE_TLS_REJECT_UNAUTHORIZED environment variable to '0' makes TLS connections and HTTPS requests insecure by disabling certificate verification.
(Usenode --trace-warnings ...
to show where the warning was created)
[OpenAI Route] params { path: [ 'v1', 'chat', 'completions' ] }
[Server Config] using 1 of 1 api key
[Auth] allowed hashed codes: []
[Auth] got access code:
[Auth] hashed access code: d41d8cd98f00b204e9800998ecf8427e
[User IP] ::1
[Time] 13.03.2024, 19:07:20
[Server Config] using 1 of 1 api key
[Auth] use system api key
[Proxy] v1/chat/completions?path=v1&path=chat&path=completions
[Base Url] https://gigachat.devices.sberbank.ru/api
from chatgpt-next-web-langchain.
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
from chatgpt-next-web-langchain.
Бот обнаружил, что язык сообщения о проблеме не английский, и переведет его автоматически.
Не помогло
PS C:\Users\DEN\Desktop\ChatGPT-Next-Web-LangChain-2.11.2> yarn dev
yarn run v1.22.22
$ next dev
- ready started server on 0.0.0.0:3000, url: http://localhost:3000
- info Loaded env from C:\Users\DEN\Desktop\ChatGPT-Next-Web-LangChain-2.11.2.env
[Next] build mode standalone
[Next] build with chunk: true - warn You have enabled experimental feature (forceSwcTransforms) in next.config.mjs.
- warn Experimental features are not covered by semver, and may cause unexpected or broken application behavior. Use at your own risk.
(node:5300) Warning: Setting the NODE_TLS_REJECT_UNAUTHORIZED environment variable to '0' makes TLS connections and HTTPS requests insecure by disabling certificate verification.
(Use node --trace-warnings ...
to show where the warning was created)
- event compiled client and server successfully in 798 ms (20 modules)
- wait compiling...
- event compiled client and server successfully in 289 ms (20 modules)
[Next] build mode standalone
[Next] build with chunk: true - wait compiling /page (client and server)...
- event compiled client and server successfully in 8.7s (4483 modules)
[Next] build mode standalone
[Next] build with chunk: true
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key - wait compiling /api/config/route (client and server)...
- event compiled successfully in 644 ms (105 modules)
[Server Config] using 1 of 1 api key - wait compiling /api/openai/[...path]/route (client and server)...
- event compiled successfully in 714 ms (113 modules)
[Server Config] using 1 of 1 api key
[OpenAI Route] params { path: [ 'v1', 'chat', 'completions' ] }
[Server Config] using 1 of 1 api key
[Auth] allowed hashed codes: []
[Auth] got access code:
[Auth] hashed access code: d41d8cd98f00b204e9800998ecf8427e
[User IP] ::1
[Time] 13.03.2024, 20:17:55
[Server Config] using 1 of 1 api key
[Auth] use system api key
[Proxy] v1/chat/completions?path=v1&path=chat&path=completions
[Base Url] https://gigachat.devices.sberbank.ru/api
(node:5152) Warning: Setting the NODE_TLS_REJECT_UNAUTHORIZED environment variable to '0' makes TLS connections and HTTPS requests insecure by disabling certificate verification.
(Usenode --trace-warnings ...
to show where the warning was created)
[OpenAI Route] params { path: [ 'v1', 'chat', 'completions' ] }
[Server Config] using 1 of 1 api key
[Auth] allowed hashed codes: []
[Auth] got access code:
[Auth] hashed access code: d41d8cd98f00b204e9800998ecf8427e
[User IP] ::1
[Time] 13.03.2024, 20:17:56
[Server Config] using 1 of 1 api key
[Auth] use system api key
[Proxy] v1/chat/completions?path=v1&path=chat&path=completions
[Base Url] https://gigachat.devices.sberbank.ru/api
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
The bot has detected that the language of the problem message is not English and will translate it automatically.
![image](https://private-user-images.githubusercontent.com/14031260/312488161-9082049b-4866-43d8-b603-dcbeefb5cc96.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..1j 0SSV6Q4JFftY3a-q5OYNf6qkcMXDEbXDUgTAi1kiM) ![image]( https://private-user-images.githubusercontent.com/14031260/312488577-b068fa8b-7a74-4c29-89fa-64613186c9c9.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..4fnLibS6qWKy Dj7IBT6q-xUOqQRlHQmVTTbGCbML_zc)
Did not help
PS C:\Users\DEN\Desktop\ChatGPT-Next-Web-LangChain-2.11.2> yarn dev
yarn run v1.22.22
$nextdev
- ready started server on 0.0.0.0:3000, url: http://localhost:3000
- info Loaded env from C:\Users\DEN\Desktop\ChatGPT-Next-Web-LangChain-2.11.2.env
[Next] build mode standalone
[Next] build with chunk: true - warn You have enabled experimental feature (forceSwcTransforms) in next.config.mjs.
- warn Experimental features are not covered by semver, and may cause unexpected or broken application behavior. Use at your own risk.
(node:5300) Warning: Setting the NODE_TLS_REJECT_UNAUTHORIZED environment variable to '0' makes TLS connections and HTTPS requests insecure by disabling certificate verification.
(Use node --trace-warnings ...
to show where the warning was created)
- event compiled client and server successfully in 798 ms (20 modules)
- wait compiling...
- event compiled client and server successfully in 289 ms (20 modules)
[Next] build mode standalone
[Next] build with chunk: true - wait compiling /page (client and server)...
- event compiled client and server successfully in 8.7s (4483 modules)
[Next] build mode standalone
[Next] build with chunk: true
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
fatal: your current branch 'master' does not have any commits yet
[Build Config] No git or not from git repo.
[Server Config] using 1 of 1 api key
[Server Config] using 1 of 1 api key - wait compiling /api/config/route (client and server)...
- event compiled successfully in 644 ms (105 modules)
[Server Config] using 1 of 1 api key - wait compiling /api/openai/[...path]/route (client and server)...
- event compiled successfully in 714 ms (113 modules)
[Server Config] using 1 of 1 api key
[OpenAI Route] params { path: [ 'v1', 'chat', 'completions' ] }
[Server Config] using 1 of 1 api key
[Auth] allowed hashed codes: []
[Auth] got access code:
[Auth] hashed access code: d41d8cd98f00b204e9800998ecf8427e
[User IP] ::1
[Time] 03/13/2024, 20:17:55
[Server Config] using 1 of 1 api key
[Auth] use system api key
[Proxy] v1/chat/completions?path=v1&path=chat&path=completions
[Base Url] https://gigachat.devices.sberbank.ru/api
(node:5152) Warning: Setting the NODE_TLS_REJECT_UNAUTHORIZED environment variable to '0' makes TLS connections and HTTPS requests insecure by disabling certificate verification.
(Usenode --trace-warnings ...
to show where the warning was created)
[OpenAI Route] params { path: [ 'v1', 'chat', 'completions' ] }
[Server Config] using 1 of 1 api key
[Auth] allowed hashed codes: []
[Auth] got access code:
[Auth] hashed access code: d41d8cd98f00b204e9800998ecf8427e
[User IP] ::1
[Time] 03/13/2024, 20:17:56
[Server Config] using 1 of 1 api key
[Auth] use system api key
[Proxy] v1/chat/completions?path=v1&path=chat&path=completions
[Base Url] https://gigachat.devices.sberbank.ru/api
from chatgpt-next-web-langchain.
Maybe I’ll give you my key and you can check for yourself whether it will start or not? write where I can send you the key to check? this key works in single thread
from chatgpt-next-web-langchain.
Please send the key to [email protected]
from chatgpt-next-web-langchain.
Sent
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Sent
from chatgpt-next-web-langchain.
I didn't receive the email, please check if it was sent successfully?
from chatgpt-next-web-langchain.
Письмо успешно доставлено
[email protected]
[email protected]
Сегодня в 6:06
Я
no_name
**********
Ваше письмо было успешно доставлено указанному адресату (или адресатам,
если было указано несколько).
В случае возникновения проблемы на принимающей стороне Вы получите
отдельное уведомление от другой почтовой системы.
Это уведомление автоматически отправлено почтовой системой Яндекса.
**********
This is the mail system at host mail.yandex.net.
Your message was successfully delivered to the destination(s)
listed below. If the message was delivered to mailbox you will
receive no further notifications. Otherwise you may still receive
notifications of mail delivery errors from other systems.
<[email protected]>: delivery via route3.mx.cloudflare.net[162.159.205.25]:25:
250 2.0.0 Ok
Reporting-MTA: dns; forward101b.mail.yandex.net
X-Yandex-Queue-ID: 70A94608E5
X-Yandex-Sender: rfc822; [email protected]
Arrival-Date: Thu, 14 Mar 2024 04:06:13 +0300 (MSK)
Final-Recipient: rfc822; [email protected]
Original-Recipient: rfc822;[email protected]
Action: relayed
Status: 2.0.0
Remote-MTA: dns; route3.mx.cloudflare.net
Diagnostic-Code: smtp; 250 2.0.0 Ok
from chatgpt-next-web-langchain.
duplicated from another Google mail
from chatgpt-next-web-langchain.
did you receive the letter?
from chatgpt-next-web-langchain.
Received, trying to verify.
from chatgpt-next-web-langchain.
I found the problem, and a targeted branch will be posted in the evening.
from chatgpt-next-web-langchain.
Thank you :) can you write me a message so I can find out?
from chatgpt-next-web-langchain.
I'll let you know here when it's released.
from chatgpt-next-web-langchain.
thanks, I'll wait.
from chatgpt-next-web-langchain.
Please use the code from the branch feat-gigachat at https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/tree/feat-gigachat for verification. This part of the business is quite niche, so it is unlikely to be merged into the main branch.
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
To check out the code, use the feat-gigachat threads at https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/tree/feat-gigachat . This part of the business is quite niche, so it is unlikely to be explained with the main directions.
I downloaded the code
I made sure there is gigachat
from chatgpt-next-web-langchain.
все я разобрался
скажите если будут обновления в части настроек как мне перенести этот чат в новую папку? или пользоваться только этой? а еще как отключить запрос сертификатов?
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
I figured it all out
tell me if there are updates regarding the settings, how can I move this chat to a new folder? or just use this one? How can I disable the certificate request?
from chatgpt-next-web-langchain.
еще подскажите как добавить несколько моделей BASE_URL=https://gigachat.devices.sberbank.ru/api
CUSTOM_MODELS=GigaChat-Plus?
еще подскажите что означают эти настройки
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
also tell me how to add several models BASE_URL=https://gigachat.devices.sberbank.ru/api
CUSTOM_MODELS=GigaChat-Plus?
from chatgpt-next-web-langchain.
подскажите чат так же может рисовать по запросу но у меня не отображает картинку как можно настроить? в старом коде который я вам скидывал чат отображает картинки
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Tell me, the chat can also draw on request, but it doesn’t display the picture for me, how can I configure it? in the old code that I sent you, the chat displays pictures
from chatgpt-next-web-langchain.
You need to ask him to display it with markdown image tags instead of using html.
from chatgpt-next-web-langchain.
а как добавить еще одну модель GigaChat-Pro что бы было 2 модели? еще скажите вы письмо от меня получили с какой почты?
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
how to add another GigaChat-Pro model so that there are 2 models? Also, tell me, did you receive a letter from me from which post office?
from chatgpt-next-web-langchain.
не получилось
может быть нужно добавить https://developers.sber.ru/docs/ru/gigachat/api/images-generation?lang=py
Get image by ID
Updated March 13, 2024
GET
https://gigachat.devices.sberbank.ru/api/v1 /files/:file_id/content
Authorization: http
Name: Access Token
Type: http
Description :Authentication using an access token. Used in all requests to the GigaChat API, except for the request to obtain an access token .
Scheme :bearer
Token Format :JWT
Returns an image file in binary representation, in JPG format.
Images are created using a POST request /chat/completions .
Read more in the Image Generation section .
The request console is disabled due to the binary response format.
Request
Path Parameters
file_id stringrequired
ID of the created image, received in response to a user request. Contained in the model response, in the tag , in the attribute src.
More details in the Image Generation section .
Answers
200
400
401
404
OK
application/jpg
import requests
url = "https://gigachat.devices.sberbank.ru/api/v1/files/:file_id/content"
payload={}
headers = {
'Accept': 'application/jpg',
'Authorization': 'Bearer '
}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
Creating Images
Updated February 13, 2024
In response to prompts composed in a certain way, the GigaChat API can return images. Images are generated in binary form in JPG format.
Images can be created with "role": "system"or without a system prompt ( ).
In response, GigaChat returns the ID of the created image, which can be downloaded using the request POST /files/:file_id/content.
Functionality not available on GigaChain.
Request to create an image
Below are examples of requests to create images both with and without a system prompt.
Without a system prompt
To create an image without a system prompt, it is enough POST /chat/completionsto send a user message ( "role": "user") in the request with the prompt prompt indicating that you need to create an image (“draw ...”):
curl -L -X POST 'https://gigachat.devices.sberbank.ru/api/v1/chat/completions'
-H 'Content-Type: application/json'
-H 'Accept: application/json'
-H 'Authorization: Bearer <токен_доступа>'
--data-raw '{
"model": "GigaChat",
"messages": [
{
"role": "user",
"content": "Нарисуй логотип моей компании, которая занимается производством корма для котов."
}
],
"temperature": 1,
"top_p": 0.1,
"n": 1,
"stream": false,
"max_tokens": 512,
"repetition_penalty": 1,
"update_interval": 0
}'
With a system prompt
If, when creating an image, you need to pass a system prompt ( "role": "system"), for example, to create a context, then it must contain the phrase:
Если тебя просят создать изображение, ты должен сгенерировать специальный блок: text2image(query: str, style: str),\nгде query — текстовое описание желаемого изображения, style — необязательный параметр, задающий стиль изображения.
Example:
curl -L -X POST 'https://gigachat.devices.sberbank.ru/api/v1/chat/completions'
-H 'Content-Type: application/json'
-H 'Accept: application/json'
-H 'Authorization: Bearer <токен_доступа>'
--data-raw '{
"model": "GigaChat",
"messages": [
{
"role": "system",
"content": "Ты профессиональный художник. Если тебя просят создать изображение, ты должен сгенерировать специальный блок: text2image(query: str, style: str),\nгде query — логотип моей компании, которая занимается производством корма для котов, style — абстракция."
}
],
"temperature": 1,
"top_p": 0.1,
"n": 1,
"stream": false,
"max_tokens": 512,
"repetition_penalty": 1,
"update_interval": 0
}'
Downloading an image
The model's response will contain the identifier of the resulting image in the tag , in the attribute src:
{
"choices": [
{
"message": {
"content": "<img src="1d72amtvbcc2jj2dbcdqwk2t00d2tj0r5pxze88t0r0kynrg00c2211em740pqabbwc26njsa9a7jngz11a741tp0na2m0rzb1a2a10da8djt08wbgd2enq2a5gpr" fuse="true"/>",
"role": "assistant"
},
"index": 0,
"finish_reason": "stop"
}
],
"created": 1707384246,
"model": "GigaChat:3.1.24.3",
"object": "chat.completion",
"usage": {
"prompt_tokens": 100,
"completion_tokens": 38,
"total_tokens": 138,
"system_tokens": 0
}
}
To download the image, pass the received identifier in the request GET /files/{file_id}/content:
import requests
import shutil
url = "https://gigachat.devices.sberbank.ru/api/v1/files/<идентификатор_изображения>/content"
headers = {
'Accept': 'application/jpg',
'Authorization': 'Bearer <токен_доступа>'
}
response = requests.request("GET", url, headers=headers, stream=True)
with open('<имя_файла>.jpg', 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
The response to the request will contain a binary representation of the file in JPG format.
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
This feature of voice input requires https support. If you don't need it, you can edit package.json and change'next dev --experimental-https 'to'next dev'.
Or you can also select Yes when the window pops up.
from chatgpt-next-web-langchain.
еще подскажите как добавить несколько моделей BASE_URL=https://gigachat.devices.sberbank.ru/api CUSTOM_MODELS=GigaChat-Plus? еще подскажите что означают эти настройки
You can edit CUSTOM_MODELS to add models.
For example
CUSTOM_MODELS=-all,GigaChat,GigaChat:latest,GigaChat-Plus,GigaChat-Pro
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
also tell me how to add several models BASE_URL=https://gigachat.devices.sberbank.ru/api CUSTOM_MODELS=GigaChat-Plus? also tell me what these settings mean![Screenshot_6](https://private-user-images.githubusercontent.com/118296790/312825360-8d5c32c0-3342-48f7-a8e3-9cbf6924d3c1.png?jwt=eyJhbGciOiJIUzI1NiIsInR5 cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTA0NjU0MDgsIm5iZiI6MTcxMDQ2NTEwOCwicGF0a CI6Ii8xMTgyOTY3OTAvMzEyODI1MzYwLThkNWMzMmMwLTMzNDItNDhmNy1hOGUzLTljYmY2OTI0ZDNjMS5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhb D1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjQwMzE1JTJGdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI0MDMxNVQwMTExNDhaJlgtQW16LUV4cGlyZXM9M zAwJlgtQW16LVNpZ25hdHVyZT02NDNhOWRhMTE2OGVhMWExOWYyNjE0YjkwMTYzOTg4NjEwMzk0MzI2Yzk1MGRjNGU3MjhiNjNjYmY2MzdhMzU1JlgtQW16LVNpZ25lZEhlYWRlcnM9aG 9zdCZhY3Rvcl9pZD0wJmtleV9pZD0wJnJlcG9faWQ9MCJ9.4xzAVUTEHYTCw1NhiHFV06Vk_kd0jyPc_9Emv9DE8h0)
You can edit CUSTOM_MODELS to add models.
For example
CUSTOM_MODELS=-all,GigaChat,GigaChat:latest,GigaChat-Plus,GigaChat-Pro
from chatgpt-next-web-langchain.
Скажите как удалить не нужные мне подсказки?
Как удалить не нужные мне модели из списка?
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Tell me how to remove hints that I don’t need?
How can I remove models I don’t need from the list?
from chatgpt-next-web-langchain.
а как добавить еще одну модель GigaChat-Pro что бы было 2 модели? еще скажите вы письмо от меня получили с какой почты?
Sorry, I just saw the email, it was from your gmail mailbox.
There is currently no donation channel for this project, or you can give this project one star.
from chatgpt-next-web-langchain.
Скажите как удалить не нужные мне подсказки? Как удалить не нужные мне модели из списка?
-all
to delete the default model
CUSTOM_MODELS=-all,GigaChat,GigaChat:latest,GigaChat-Plus,GigaChat-Pro
The mask is the default feature, you need to edit the code if you want to delete it.
app\masks\cn.ts
and app\masks\en.ts
from chatgpt-next-web-langchain.
а как добавить еще одну модель GigaChat-Pro что бы было 2 модели? еще скажите вы письмо от меня получили с какой почты?
Извините, я только что увидел письмо, оно было из вашего почтового ящика Gmail. В настоящее время для этого проекта нет канала пожертвований, или вы можете поставить этому проекту одну звезду.
добавил +
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
how to add another GigaChat-Pro model so that there are 2 models? Also, tell me, did you receive a letter from me from which post office?
Sorry, I just saw the email, it was from your Gmail inbox. There is currently no donation channel for this project, or you can give this project one star.
added +
from chatgpt-next-web-langchain.
Скажите как удалить не нужные мне подсказки? Как удалить не нужные мне модели из списка?
-all
удалить модель по умолчаниюCUSTOM_MODELS=-all,GigaChat,GigaChat:latest,GigaChat-Plus,GigaChat-Pro
Маска является функцией по умолчанию. Если вы хотите удалить ее, вам необходимо отредактировать код.
app\masks\cn.ts
иapp\masks\en.ts
я хочу удалить не нужные маски и оставить только свои созданные
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Tell me how to remove hints that I don’t need? ![Screenshot_2](https://private-user-images.githubusercontent.com/118296790/313038292-4bf7da1c-db74-410f-9f38-67b19bf19606.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVC J9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTA0NjYzNjcsIm5iZiI6MTcxMDQ2NjA2NywicGF0aCI6Ii 8xMTgyOTY3OTAvMzEzMDM4MjkyLTRiZjdkYTFjLWRiNzQtNDEwZi05ZjM4LTY3YjE5YmYxOTYwNi5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhbD 1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjQwMzE1JTJGdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI0MDMxNVQwMTI3NDdaJlgtQW16LUV4cGlyZXM9M zAwJlgtQW16LVNpZ25hdHVyZT04MmJmNjAzODZjNTI4MWUzMWQ3OWY5MDBkOWY1NTkzNTE1NDFkNDU1ZjVjYzlkNDI3OGUyNjBkZjkxMDNmZjEzJlgtQW16LVNpZ25lZEhlYWRlcnM9aG9 zdCZhY3Rvcl9pZD0wJmtleV9pZD0wJnJlcG9faWQ9MCJ9.YqEPI62GDOi4yFDWPZOQvvvGeW4p208apvozkqMWr7g) How to remove models I don’t need from the list? ![Screenshot_3](https://private-user-images.githubusercontent.com/118296790/313038462-3302b1b0-6138-4d8c-8651-d8140d367207.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpX VCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTA0NjYzNjcsIm5iZiI6MTcxMDQ2NjA2NywicGF0aCI6I i8xMTgyOTY3OTAvMzEzMDM4NDYyLTMzMDJiMWIwLTYxMzgtNGQ4Yy04NjUxLWQ4MTQwZDM2NzIwNy5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhbD 1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjQwMzE1JTJGdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI0MDMxNVQwMTI3NDdaJlgtQW16LUV4cGlyZXM9M zAwJlgtQW16LVNpZ25hdHVyZT0wODBlN2FkNGEzMGM2ZDVjMzI2ZTNiYWY1Zjg1NmY0YjljZTNhYmVkZDhiZjM3Mzc2OGMyOTU5NzlkNjk3ZDFkJlgtQW16LVNpZ25lZEhlYWRlcnM9aG 9zdCZhY3Rvcl9pZD0wJmtleV9pZD0wJnJlcG9faWQ9MCJ9.IdN3ARrb_v1rPtKcVItwaeAemKBvp9dbjQ3N1ZZSk0A)
-all
remove default modelCUSTOM_MODELS=-all,GigaChat,GigaChat:latest,GigaChat-Plus,GigaChat-Pro
Mask is the default function. If you want to remove it, you need to edit the code.
app\masks\cn.ts
andapp\masks\en.ts
I want to remove unnecessary masks and leave only my created ones
from chatgpt-next-web-langchain.
Скажите как удалить не нужные мне подсказки? Как удалить не нужные мне модели из списка?
-all
удалить модель по умолчаниюCUSTOM_MODELS=-all,GigaChat,GigaChat:latest,GigaChat-Plus,GigaChat-Pro
Маска является функцией по умолчанию. Если вы хотите удалить ее, вам необходимо отредактировать код.
app\masks\cn.ts
иapp\masks\en.ts
я хочу удалить не нужные маски и оставить только свои созданные
You need to modify the two files above.
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Tell me how to remove hints that I don’t need? ![Screenshot_2](https://private-user-images.githubusercontent.com/118296790/313038292-4bf7da1c-db74-410f-9f38-67b19bf19606.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVC J9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTA0NjYzNjcsIm5iZiI6MTcxMDQ2NjA2NywicGF0aCI6Ii 8xMTgyOTY3OTAvMzEzMDM4MjkyLTRiZjdkYTFjLWRiNzQtNDEwZi05ZjM4LTY3YjE5YmYxOTYwNi5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhbD 1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjQwMzE1JTJGdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI0MDMxNVQwMTI3NDdaJlgtQW16LUV4cGlyZXM9M zAwJlgtQW16LVNpZ25hdHVyZT04MmJmNjAzODZjNTI4MWUzMWQ3OWY5MDBkOWY1NTkzNTE1NDFkNDU1ZjVjYzlkNDI3OGUyNjBkZjkxMDNmZjEzJlgtQW16LVNpZ25lZEhlYWRlcnM9aG9 zdCZhY3Rvcl9pZD0wJmtleV9pZD0wJnJlcG9faWQ9MCJ9.YqEPI62GDOi4yFDWPZOQvvvGeW4p208apvozkqMWr7g) How to remove models I don’t need from the list? ![Screenshot_3](https://private-user-images.githubusercontent.com/118296790/313038462-3302b1b0-6138-4d8c-8651-d8140d367207.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpX VCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTA0NjYzNjcsIm5iZiI6MTcxMDQ2NjA2NywicGF0aCI6I i8xMTgyOTY3OTAvMzEzMDM4NDYyLTMzMDJiMWIwLTYxMzgtNGQ4Yy04NjUxLWQ4MTQwZDM2NzIwNy5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhbD 1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjQwMzE1JTJGdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI0MDMxNVQwMTI3NDdaJlgtQW16LUV4cGlyZXM9M zAwJlgtQW16LVNpZ25hdHVyZT0wODBlN2FkNGEzMGM2ZDVjMzI2ZTNiYWY1Zjg1NmY0YjljZTNhYmVkZDhiZjM3Mzc2OGMyOTU5NzlkNjk3ZDFkJlgtQW16LVNpZ25lZEhlYWRlcnM9aG 9zdCZhY3Rvcl9pZD0wJmtleV9pZD0wJnJlcG9faWQ9MCJ9.IdN3ARrb_v1rPtKcVItwaeAemKBvp9dbjQ3N1ZZSk0A)
-all
remove default modelCUSTOM_MODELS=-all,GigaChat,GigaChat:latest,GigaChat-Plus,GigaChat-Pro
Mask is the default feature. If you want to remove it, you need to edit the code.
app\masks\cn.ts
andapp\masks\en.ts
I want to remove unnecessary masks and leave only my created ones
You need to modify the two files above.
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Tell me how to remove hints that I don’t need? ![Screenshot_2](https://private-user-images.githubusercontent.com/118296790/313038292-4bf7da1c-db74-410f-9f38-67b19bf19606.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVC J9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTA0NjYzNjcsIm5iZiI6MTcxMDQ2NjA2NywicGF0aCI6Ii 8xMTgyOTY3OTAvMzEzMDM4MjkyLTRiZjdkYTFjLWRiNzQtNDEwZi05ZjM4LTY3YjE5YmYxOTYwNi5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhbD 1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjQwMzE1JTJGdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI0MDMxNVQwMTI3NDdaJlgtQW16LUV4cGlyZXM9M zAwJlgtQW16LVNpZ25hdHVyZT04MmJmNjAzODZjNTI4MWUzMWQ3OWY5MDBkOWY1NTkzNTE1NDFkNDU1ZjVjYzlkNDI3OGUyNjBkZjkxMDNmZjEzJlgtQW16LVNpZ25lZEhlYWRlcnM9aG9 zdCZhY3Rvcl9pZD0wJmtleV9pZD0wJnJlcG9faWQ9MCJ9.YqEPI62GDOi4yFDWPZOQvvvGeW4p208apvozkqMWr7g) How to remove models I don’t need from the list? ![Screenshot_3](https://private-user-images.githubusercontent.com/118296790/313038462-3302b1b0-6138-4d8c-8651-d8140d367207.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpX VCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTA0NjYzNjcsIm5iZiI6MTcxMDQ2NjA2NywicGF0aCI6I i8xMTgyOTY3OTAvMzEzMDM4NDYyLTMzMDJiMWIwLTYxMzgtNGQ4Yy04NjUxLWQ4MTQwZDM2NzIwNy5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhbD 1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjQwMzE1JTJGdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI0MDMxNVQwMTI3NDdaJlgtQW16LUV4cGlyZXM9M zAwJlgtQW16LVNpZ25hdHVyZT0wODBlN2FkNGEzMGM2ZDVjMzI2ZTNiYWY1Zjg1NmY0YjljZTNhYmVkZDhiZjM3Mzc2OGMyOTU5NzlkNjk3ZDFkJlgtQW16LVNpZ25lZEhlYWRlcnM9aG 9zdCZhY3Rvcl9pZD0wJmtleV9pZD0wJnJlcG9faWQ9MCJ9.IdN3ARrb_v1rPtKcVItwaeAemKBvp9dbjQ3N1ZZSk0A)
-all
remove default modelCUSTOM_MODELS=-all,GigaChat,GigaChat:latest,GigaChat-Plus,GigaChat-Pro
Mask is the default function. If you want to remove it, you need to edit the code.
app\masks\cn.ts
andapp\masks\en.ts
I want to remove unnecessary masks and leave only my created ones
можете на примере показать какие строки удалить что бы не сломать весь код на примере 1 маски?
from chatgpt-next-web-langchain.
If you don't need the existing mask, you can modify the content of the two files to look like the following
import { BuiltinMask } from "./typing";
export const CN_MASKS: BuiltinMask[] = [];
from chatgpt-next-web-langchain.
I need to go to work. If I have any other questions, I will reply to you later.
from chatgpt-next-web-langchain.
Если вам не нужна существующая маска, вы можете изменить содержимое двух файлов, чтобы оно выглядело следующим образом.
import { BuiltinMask } from "./typing"; export const CN_MASKS: BuiltinMask[] = [];
from chatgpt-next-web-langchain.
app\masks\cn.ts
import { BuiltinMask } from "./typing";
export const CN_MASKS: BuiltinMask[] = [];
app\masks\en.ts
import { BuiltinMask } from "./typing";
export const EN_MASKS: BuiltinMask[] = [];
from chatgpt-next-web-langchain.
приложение\маски\cn.ts
import { BuiltinMask } from "./typing"; export const CN_MASKS: BuiltinMask[] = [];
приложение\маски\en.ts
import { BuiltinMask } from "./typing"; export const EN_MASKS: BuiltinMask[] = [];
спасибо получилось:)
from chatgpt-next-web-langchain.
Подскажите по поводу создания фотографий что они не отображаются я выше писал? можете у себя проверить откроется картинка? запрос я писал "Вам нужно попросить его отобразить его с помощью тегов изображений уценки вместо использования html." не помогло.
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Tell me about creating photos that they are not displayed as I wrote above? Can you check if the picture will open? request I wrote "You need to ask it to display it using markdown image tags instead of using html." did not help.
from chatgpt-next-web-langchain.
I re-read the Python code above. Image generation requires a special interface call, which is not specifically implemented in this project.
from chatgpt-next-web-langchain.
а можно настроить тогда что бы не отображал в чате а сразу скачивал в папку GigaPhoto на рабочий стол если папки нет то она создается?
как в этом коде:
import webbrowser
import requests
import json
import uuid
import os
import logging
from flask import Flask, request, jsonify, render_template, send_from_directory
API_URL = "https://gigachat.devices.sberbank.ru/api/v1/chat/completions"
TOKEN_COUNT_URL = "https://gigachat.devices.sberbank.ru/api/v1/tokens/count"
OAUTH_URL = "https://ngw.devices.sberbank.ru:9443/api/v2/oauth"
SCOPE = "GIGACHAT_API_PERS"
FILE_URL = "https://gigachat.devices.sberbank.ru/api/v1/files/{file_id}/content"
DESKTOP_PATH = os.path.join(os.path.expanduser("~"), "Desktop")
IMAGES_FOLDER = os.path.join(DESKTOP_PATH, "GigaPhoto")
if not os.path.exists(IMAGES_FOLDER):
os.makedirs(IMAGES_FOLDER)
app = Flask(name)
Настройка логирования
logging.basicConfig(filename=os.path.expanduser('~/Desktop/api_logs.txt'), level=logging.INFO)
def log_request_response(url, headers, data, response):
logging.info(f"URL: {url}")
logging.info(f"Request Headers: {headers}")
logging.info(f"Request Body: {data}")
logging.info(f"Response Status Code: {response.status_code}")
logging.info(f"Response Headers: {response.headers}")
logging.info(f"Response Body: {response.text}")
logging.info("\n\n")
def get_token(CLIENT_SECRET):
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic ' + CLIENT_SECRET,
'RqUID': str(uuid.uuid4())
}
data = {
'scope': SCOPE,
'grant_type': 'client_credentials',
}
response = requests.post(OAUTH_URL, headers=headers, data=data, verify=False)
log_request_response(OAUTH_URL, headers, data, response)
if response.text:
return response.json().get("access_token")
else:
return None
def send_request(question, token, temperature, model, chat_history, max_tokens=32000):
if not token or not question:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
data = {
"model": model,
"messages": chat_history + [{"role": "user", "content": question}],
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": 0.1,
"n": 1,
"stream": False,
"repetition_penalty": 1,
"update_interval": 0
}
try:
response = requests.post(API_URL, headers=headers, json=data, verify=False, timeout=600) # Увеличено время допустимого таймаута
log_request_response(API_URL, headers, data, response)
if response.text:
return response.json()
else:
return None
except Exception as e:
logging.error(f"Error occurred: {str(e)}") # Логирование ошибок
return None
def get_image(file_id, token):
if not token:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
'RqUID': str(uuid.uuid4())
}
response = requests.get(FILE_URL.format(file_id=file_id), headers=headers, verify=False)
log_request_response(FILE_URL.format(file_id=file_id), headers, None, response)
if response.content:
return response.content
else:
return None
def count_tokens(text, model, token):
if not token:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token,
}
data = {
"model": model,
"input": [text]
}
response = requests.post(TOKEN_COUNT_URL, headers=headers, json=data, verify=False)
if response.text:
return response.json()
else:
return None
@app.route('/', methods=['GET'])
def home():
return render_template('index.html')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/', methods=['POST'])
def process():
data = request.get_json()
question = data.get('question')
prompt = data.get('prompt') # Получение промпта из запроса
CLIENT_SECRET = data.get('client_secret')
temperature = float(data.get('temperature', 0.2))
model = data.get('model', "GigaChat-Plus") # Получение модели из запроса
token = get_token(CLIENT_SECRET)
chat_history = data.get('chat_history', []) # Получение истории чата из запроса
if token:
# Добавление промпта к вопросу
try:
response = send_request(prompt + question if prompt else question, token, temperature, model, chat_history,
max_tokens=32000)
token_count = count_tokens(prompt + question if prompt else question, model, token)
if response and "choices" in response:
answer = response["choices"][0]["message"]["content"]
answer_token_count = count_tokens(answer, model, token) # Подсчет токенов в ответе
if "<img src=" in answer:
file_id = answer.split('"')[1]
image_data = get_image(file_id, token)
if image_data:
file_name = os.path.join(IMAGES_FOLDER, file_id + ".jpg")
with open(file_name, "wb") as f:
f.write(image_data)
answer += f"\nИзображение сохранено в файле {file_name}"
else:
answer += "\nОшибка: не удалось получить изображение"
return jsonify({'text': answer, 'token_count': token_count[0], 'answer_token_count': answer_token_count[0]})
else:
return jsonify({'error': 'Server returned unexpected response'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
else:
return jsonify({'error': 'Failed to get token'}), 500
@app.route('/continue', methods=['POST']) # Добавлена новая маршрутизация для продолжения сообщений
def continue_message():
data = request.get_json()
message_text = data.get('message_text')
return jsonify({'message_continued': message_text})
if name == 'main':
webbrowser.open("http://localhost:5000")
app.run(host='0.0.0.0', port=5000)
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Is it possible to set it up so that it is not displayed in the chat, but is immediately downloaded to the GigaPhoto folder on the desktop? If the folder does not exist, then it is created?
like in this code:
import webbrowser
import requests
import json
import uuid
import os
import logging
from flask import Flask, request, jsonify, render_template, send_from_directory
API_URL = "https://gigachat.devices.sberbank.ru/api/v1/chat/completions"
TOKEN_COUNT_URL = "https://gigachat.devices.sberbank.ru/api/v1/tokens/count"
OAUTH_URL = "https://ngw.devices.sberbank.ru:9443/api/v2/oauth"
SCOPE = "GIGACHAT_API_PERS"
FILE_URL = "https://gigachat.devices.sberbank.ru/api/v1/files/{file_id}/content"
DESKTOP_PATH = os.path.join(os.path.expanduser("~"), "Desktop")
IMAGES_FOLDER = os.path.join(DESKTOP_PATH, "GigaPhoto")
if not os.path.exists(IMAGES_FOLDER):
os.makedirs(IMAGES_FOLDER)
app = Flask(name)
Setting up logging
logging.basicConfig(filename=os.path.expanduser('~/Desktop/api_logs.txt'), level=logging.INFO)
def log_request_response(url, headers, data, response):
logging.info(f"URL: {url}")
logging.info(f"Request Headers: {headers}")
logging.info(f"Request Body: {data}")
logging.info(f"Response Status Code: {response.status_code}")
logging.info(f"Response Headers: {response.headers}")
logging.info(f"Response Body: {response.text}")
logging.info("\n\n")
def get_token(CLIENT_SECRET):
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic' + CLIENT_SECRET,
'RqUID': str(uuid.uuid4())
}
data = {
'scope': SCOPE,
'grant_type': 'client_credentials',
}
response = requests.post(OAUTH_URL, headers=headers, data=data, verify=False)
log_request_response(OAUTH_URL, headers, data, response)
if response.text:
return response.json().get("access_token")
else:
return None
def send_request(question, token, temperature, model, chat_history, max_tokens=32000):
if not token or not question:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer' + token,
'RqUID': str(uuid.uuid4())
}
data = {
"model": model,
"messages": chat_history + [{"role": "user", "content": question}],
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": 0.1,
"n": 1,
"stream": False,
"repetition_penalty": 1,
"update_interval": 0
}
try:
response = requests.post(API_URL, headers=headers, json=data, verify=False, timeout=600) # Increased allowed timeout time
log_request_response(API_URL, headers, data, response)
if response.text:
return response.json()
else:
return None
except Exception as e:
logging.error(f"Error occurred: {str(e)}") # Log errors
return None
def get_image(file_id, token):
if not token:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer' + token,
'RqUID': str(uuid.uuid4())
}
response = requests.get(FILE_URL.format(file_id=file_id), headers=headers, verify=False)
log_request_response(FILE_URL.format(file_id=file_id), headers, None, response)
if response.content:
return response.content
else:
return None
def count_tokens(text, model, token):
if not token:
return None
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer' + token,
}
data = {
"model": model,
"input": [text]
}
response = requests.post(TOKEN_COUNT_URL, headers=headers, json=data, verify=False)
if response.text:
return response.json()
else:
return None
@app.route('/', methods=['GET'])
def home():
return render_template('index.html')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route('/', methods=['POST'])
def process():
data = request.get_json()
question = data.get('question')
prompt = data.get('prompt') # Getting prompt from request
CLIENT_SECRET = data.get('client_secret')
temperature = float(data.get('temperature', 0.2))
model = data.get('model', "GigaChat-Plus") # Retrieving the model from the request
token = get_token(CLIENT_SECRET)
chat_history = data.get('chat_history', []) # Get chat history from request
if token:
# Adding a prompt to a question
try:
response = send_request(prompt + question if prompt else question, token, temperature, model, chat_history,
max_tokens=32000)
token_count = count_tokens(prompt + question if prompt else question, model, token)
if response and "choices" in response:
answer = response["choices"][0]["message"]["content"]
answer_token_count = count_tokens(answer, model, token) # Counting tokens in the answer
if "<img src=" in answer:
file_id = answer.split('"')[1]
image_data = get_image(file_id, token)
if image_data:
file_name = os.path.join(IMAGES_FOLDER, file_id + ".jpg")
with open(file_name, "wb") as f:
f.write(image_data)
answer += f"\nThe image is saved in the file {file_name}"
else:
answer += "\nError: failed to get image"
return jsonify({'text': answer, 'token_count': token_count[0], 'answer_token_count': answer_token_count[0]})
else:
return jsonify({'error': 'Server returned unexpected response'}), 500
except Exception as e:
return jsonify({'error': str(e)}), 500
else:
return jsonify({'error': 'Failed to get token'}), 500
@app.route('/continue', methods=['POST']) # Added new routing for message continuation
def continue_message():
data = request.get_json()
message_text = data.get('message_text')
return jsonify({'message_continued': message_text})
if name == 'main':
webbrowser.open("http://localhost:5000")
app.run(host='0.0.0.0', port=5000)
from chatgpt-next-web-langchain.
Retrieve the latest code
The image will be saved to the uploads
directory
from chatgpt-next-web-langchain.
Получить последний код. Изображение будет сохранено в
uploads
каталоге.
я не понял что нужно сделать и в какой папке? можете более подробно сказать?
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Get the latest code. The image will be saved in the
uploads
directory. (image) icmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTA0ODkwNDAsIm5iZiI6MTcxMDQ4ODc0MCwicGF0aCI6Ii8xNDAzMTI2MC8zMTMxMDY3MzQtNWRjYjdiYzAtZmQyYS00YjYxLWEyNTUtY2Uy ODY2M2U3NWRmLnBuZz9YLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFWQ09EWUxTQTUzUFFLNFpBJTJGMjAyNDAzMTUlMkZ1cy1lYXN0LTElMkZzMyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXR lPTIwMjQwMzE1VDA3NDU0MFomWC1BbXotRXhwaXJlcz0zMDAmWC1BbXotU2lnbmF0dXJlPWQxMDkzMWQxMTA3NjYyNTZmMDAxYWVlN2RmOWQ1NGI1YjQzYmIxZDQzMyMGE5NThiMGEzZTc4YTYyNWYzOTQmWC1BbXot U2lnbmVkSGVhZGVycz1ob3N0JmFjdG9yX2lkPTAma2V5X2lkPTAmcmVwb19pZD0wIn0.S8fff_TmsUX0ZLILHIa56pHgFNaW8p9b9wnItJNT2ik)
и не напальний что не не другать и в какой папке? Can you tell me more details?
from chatgpt-next-web-langchain.
You just need to retrieve the code from the feat-gigachat
branch and rerun the program.
When the returned content contains an img tag, the image is automatically downloaded to the uploads
folder of your program directory.
from chatgpt-next-web-langchain.
img,
не появилась папка uploadsкаталог вашей программы.
можно настроить что бы картинка сохранялась в папку Giga pfoto на рабочем столе? если папки Giga pfoto нет то мы ее создаем. я выше выкладывал код как у меня сохранялось.
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
img,
The uploads folder in your program directory has not appeared.
Is it possible to configure the image to be saved in the Giga pfoto folder on the desktop? if the Giga pfoto folder does not exist, then we create it. I posted the code above as I saved it.
from chatgpt-next-web-langchain.
Добрый день:) не смотрели еще что я выше написал что фото у меня не сохраняются? если сложно то тогда не нужно:) хотел спросить у меня в файле common.ts есть настройки GigachatToken есть возможность что бы отображалось при ответе в формате 0/0 где 0/ это сколько токенов отправлено, /0 это сколько токенов получено? инструкция есть на https://developers.sber.ru/docs/ru/gigachat/api/reference/rest/post-tokens-count
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
Good afternoon:) haven’t you seen what I wrote above that my photos are not saved? if it’s difficult, then there’s no need :) I wanted to ask, in my common.ts file there are settings for GigachatToken, is it possible for it to be displayed when answering in the format 0/0 where 0/ is how many tokens were sent, /0 is how many tokens were received? instructions are available at https://developers.sber.ru/docs/ru/gigachat/api/reference/rest/post-tokens-count
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
The bot has detected that the language of the problem message is not English and will translate it automatically.
Good afternoon:) haven’t you seen what I wrote above that I don’t have photos saved? if it’s difficult, then don’t :) I wanted to ask, I have GigachatToken settings in the common.ts file, is it possible for it to be displayed when responding in the format 0/0 where 0/ is how many tokens were sent, /0 is how many tokens was received? instructions are available at https://developers.sber.ru/docs/ru/gigachat/api/reference/rest/post-tokens-count ![Screenshot_1](https://private-user-images.githubusercontent.com/ 118296790/313388797-ac172537-2b4b-4a42-a6a3-670abbf02f68.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoic mF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3MTA1NzgxMDIsIm5iZiI6MTcxMDU3NzgwMiwicGF0aCI6Ii8xMTgyOTY3OTAvMzEzMzg4Nzk3LWFjMTcyNTM3LTJi NGItNGE0Mi1hNmEzLTY3MGFiYmYwMmY2OC5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhbD1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjQwMzE2JTJ GdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI0MDMxNlQwODMwMDJaJlgtQW16LUV4cGlyZXM9MzAwJlgtQW16LVNpZ25hdHVyZT1mMTAwY2ZlYmE4 ZDVmNGMzN2Y0MDc5ZDM5ODgxYzEyZjQ5ODI5ZjdhOTc3M2NlMjdiZTk2NjNkYmVlOGEyMDYzJlgtQW16LVNpZ25lZEhlYWRlcnM9aG9zdCZhY3Rvcl9pZD0wJmtleV9pZD0wJnJlcG9faWQ 9MCJ9.Bv-eufUtQX8K4ZS5G-1kDGVFJGMuA8K5NfxnLDWCNns)
from chatgpt-next-web-langchain.
Bot detected the issue body's language is not English, translate it automatically.
下午好:)你没看到我上面写的我的照片没有保存吗?如果这很困难,那就没有必要:)我想问一下,在我的common.ts文件中有 GigachatToken 的设置,是否可以在以 0/0 格式回答时显示它,其中 0/ 是发送了多少个令牌,/0 是收到了多少个令牌?有关说明,请访问 https://developers.sber.ru/docs/ru/gigachat/api/reference/rest/post-tokens-count
Sorry, the function of displaying the number of tokens is not supported by this project.
If you want to modify the path of the image file, you can modify the following file:
https://github.dev/Hk-Gosuto/ChatGPT-Next-Web-LangChain/blob/0af8e7fc8731fe3403f6d72d44c1dc5ea4b8209a/app/utils/local_file_storage.ts#L6
https://github.dev/Hk-Gosuto/ChatGPT-Next-Web-LangChain/blob/0af8e7fc8731fe3403f6d72d44c1dc5ea4b8209a/app/store/chat.ts#L472
from chatgpt-next-web-langchain.
Related Issues (20)
- [Bug] 通过修改 base_url 转发的gpt(非官方直连接口),所有插件都无法使用 HOT 5
- [Feature] 语音输入和输出支持 HOT 19
- [Bug] LaTeX 渲染异常 HOT 3
- [联想问题自动发送功能] HOT 3
- 关于跨域问题 HOT 3
- [Bug] 升级2.10.3版本之后,如果在环境变量中同时设置了code 和 google_api_key,则gemini-pro必须填code才能使用,而OpenAI的模型不需要,只要在自定义接口中填入API Key即可使用。 HOT 12
- [Feature] 希望模型中加入claude3的支持 HOT 5
- [Feature] 支持smart-slides插件做ppt HOT 3
- [Feature] 期望 [对象储存] 可以增加对于Azure Storage Account(Blob储存)的接入支持 HOT 1
- [Feature]作者大大有机会上传多张图片吗 HOT 3
- 作者大大,可以考虑用oss上传不同文件吗,而不使用压缩 HOT 5
- [Bug] 点击清除聊天记录,gpt-vision-preview模型提示“无法查看图片内容”,但新建聊天后同样请求就可以 HOT 10
- [Bug] 语音朗读功能手机端经常不work HOT 5
- [Feature] 支持 OpenAI Whisper HOT 21
- [Feature Request]: 希望增加支持Claude模型调用 HOT 7
- [Bug] 移动端聚焦文本框时窗口会被放大且不会复原 HOT 5
- [Bug] 拉取了最新代码部署之后,首次加载网址巨慢,之前几秒钟,现在每次都要几十秒甚至分钟级 HOT 12
- [Feature Request]: gpts支持 HOT 5
- 作者大大什么时候能上传多个不同类型的文件呀🥹 HOT 7
Recommend Projects
-
React
A declarative, efficient, and flexible JavaScript library for building user interfaces.
-
Vue.js
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
-
Typescript
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
-
TensorFlow
An Open Source Machine Learning Framework for Everyone
-
Django
The Web framework for perfectionists with deadlines.
-
Laravel
A PHP framework for web artisans
-
D3
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
-
Recommend Topics
-
javascript
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
-
web
Some thing interesting about web. New door for the world.
-
server
A server is a program made to process requests and deliver data to clients.
-
Machine learning
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
-
Visualization
Some thing interesting about visualization, use data art
-
Game
Some thing interesting about game, make everyone happy.
Recommend Org
-
Facebook
We are working to build community through open source technology. NB: members must have two-factor auth.
-
Microsoft
Open source projects and samples from Microsoft.
-
Google
Google ❤️ Open Source for everyone.
-
Alibaba
Alibaba Open Source for everyone
-
D3
Data-Driven Documents codes.
-
Tencent
China tencent open source team.
from chatgpt-next-web-langchain.