#371 Add llama.cpp inference server support

This commit is contained in:
Cohee
2023-12-18 22:38:28 +02:00
parent 6e8104873e
commit edd737e8bd
9 changed files with 136 additions and 37 deletions

View File

@ -39,6 +39,7 @@ router.post('/status', jsonParser, async function (request, response) {
case TEXTGEN_TYPES.OOBA:
case TEXTGEN_TYPES.APHRODITE:
case TEXTGEN_TYPES.KOBOLDCPP:
case TEXTGEN_TYPES.LLAMACPP:
url += '/v1/models';
break;
case TEXTGEN_TYPES.MANCER:
@ -160,6 +161,9 @@ router.post('/generate', jsonParser, async function (request, response_generate)
case TEXTGEN_TYPES.MANCER:
url += '/oai/v1/completions';
break;
case TEXTGEN_TYPES.LLAMACPP:
url += '/completion';
break;
}
}