mirror of
https://github.com/TabbyML/tabby
synced 2024-11-22 00:08:06 +00:00
fix: when there's an error happens in background inference loop, it should exit the process (#713)
This commit is contained in:
parent
c5cfba403f
commit
9344c32b31
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -2138,6 +2138,7 @@ dependencies = [
|
||||
"futures",
|
||||
"tabby-inference",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -18,3 +18,4 @@ tabby-inference = { path = "../tabby-inference" }
|
||||
derive_builder = { workspace = true }
|
||||
futures.workspace = true
|
||||
async-stream.workspace = true
|
||||
tracing.workspace = true
|
||||
|
@ -1,3 +1,5 @@
|
||||
mod utils;
|
||||
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use async_stream::stream;
|
||||
@ -77,7 +79,7 @@ impl AsyncTextInferenceEngine {
|
||||
let result = match engine.as_mut().unwrap().step() {
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
panic!("Failed to step: {}", err)
|
||||
fatal!("Failed to step: {}", err)
|
||||
}
|
||||
};
|
||||
|
||||
@ -161,7 +163,7 @@ impl LlamaTextGeneration {
|
||||
pub fn create(options: LlamaTextGenerationOptions) -> Self {
|
||||
let engine = create_engine(options.use_gpu, &options.model_path);
|
||||
if engine.is_null() {
|
||||
panic!("Unable to load model: {}", options.model_path);
|
||||
fatal!("Unable to load model: {}", options.model_path);
|
||||
}
|
||||
let ret = LlamaTextGeneration {
|
||||
engine: Arc::new(AsyncTextInferenceEngine::create(engine)),
|
||||
|
16
crates/llama-cpp-bindings/src/utils.rs
Normal file
16
crates/llama-cpp-bindings/src/utils.rs
Normal file
@ -0,0 +1,16 @@
|
||||
#[macro_export]
|
||||
macro_rules! fatal {
|
||||
($msg:expr) => {
|
||||
({
|
||||
tracing::error!($msg);
|
||||
std::process::exit(1);
|
||||
})
|
||||
};
|
||||
|
||||
($fmt:expr, $($arg:tt)*) => {
|
||||
({
|
||||
tracing::error!($fmt, $($arg)*);
|
||||
std::process::exit(1);
|
||||
})
|
||||
};
|
||||
}
|
Loading…
Reference in New Issue
Block a user