mirror of
https://github.com/TabbyML/tabby
synced 2024-11-22 00:08:06 +00:00
fix: when connecting to localhost endpoint, do not use proxy settings (#2736)
* fix: when connecting to localhost endpoint, do not use proxy settings * update * update
This commit is contained in:
parent
53c028ec9b
commit
21b4ded487
@ -0,0 +1,3 @@
|
||||
kind: Fixed and Improvements
|
||||
body: When connecting to localhost model servers, skip the proxy settings
|
||||
time: 2024-07-26T20:29:12.300644-07:00
|
@ -4,6 +4,8 @@ use async_openai::config::OpenAIConfig;
|
||||
use tabby_common::config::HttpModelConfig;
|
||||
use tabby_inference::{ChatCompletionStream, ExtendedOpenAIConfig};
|
||||
|
||||
use crate::create_reqwest_client;
|
||||
|
||||
pub async fn create(model: &HttpModelConfig) -> Arc<dyn ChatCompletionStream> {
|
||||
let config = OpenAIConfig::default()
|
||||
.with_api_base(model.api_endpoint.clone())
|
||||
@ -24,5 +26,8 @@ pub async fn create(model: &HttpModelConfig) -> Arc<dyn ChatCompletionStream> {
|
||||
|
||||
let config = builder.build().expect("Failed to build config");
|
||||
|
||||
Arc::new(async_openai::Client::with_config(config))
|
||||
Arc::new(
|
||||
async_openai::Client::with_config(config)
|
||||
.with_http_client(create_reqwest_client(&model.api_endpoint)),
|
||||
)
|
||||
}
|
||||
|
@ -5,6 +5,8 @@ use reqwest_eventsource::{Event, EventSource};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tabby_inference::{CompletionOptions, CompletionStream};
|
||||
|
||||
use crate::create_reqwest_client;
|
||||
|
||||
pub struct LlamaCppEngine {
|
||||
client: reqwest::Client,
|
||||
api_endpoint: String,
|
||||
@ -13,7 +15,7 @@ pub struct LlamaCppEngine {
|
||||
|
||||
impl LlamaCppEngine {
|
||||
pub fn create(api_endpoint: &str, api_key: Option<String>) -> Self {
|
||||
let client = reqwest::Client::new();
|
||||
let client = create_reqwest_client(api_endpoint);
|
||||
|
||||
Self {
|
||||
client,
|
||||
|
@ -2,6 +2,8 @@ use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tabby_inference::Embedding;
|
||||
|
||||
use crate::create_reqwest_client;
|
||||
|
||||
pub struct LlamaCppEngine {
|
||||
client: reqwest::Client,
|
||||
api_endpoint: String,
|
||||
@ -10,7 +12,7 @@ pub struct LlamaCppEngine {
|
||||
|
||||
impl LlamaCppEngine {
|
||||
pub fn create(api_endpoint: &str, api_key: Option<String>) -> Self {
|
||||
let client = reqwest::Client::new();
|
||||
let client = create_reqwest_client(api_endpoint);
|
||||
|
||||
Self {
|
||||
client,
|
||||
|
@ -5,3 +5,17 @@ mod embedding;
|
||||
pub use chat::create as create_chat;
|
||||
pub use completion::{build_completion_prompt, create};
|
||||
pub use embedding::create as create_embedding;
|
||||
|
||||
fn create_reqwest_client(api_endpoint: &str) -> reqwest::Client {
|
||||
let builder = reqwest::Client::builder();
|
||||
|
||||
let is_localhost = api_endpoint.starts_with("http://localhost")
|
||||
|| api_endpoint.starts_with("http://127.0.0.1");
|
||||
let builder = if is_localhost {
|
||||
builder.no_proxy()
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
|
||||
builder.build().unwrap()
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ impl LlamaCppSupervisor {
|
||||
|
||||
pub async fn start(&self) {
|
||||
debug!("Waiting for llama-server <{}> to start...", self.name);
|
||||
let client = reqwest::Client::new();
|
||||
let client = reqwest::Client::builder().no_proxy().build().unwrap();
|
||||
loop {
|
||||
let Ok(resp) = client.get(api_endpoint(self.port) + "/health").send().await else {
|
||||
continue;
|
||||
|
Loading…
Reference in New Issue
Block a user