mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2024-12-29 01:30:29 +01:00
Fixing GPU support broke CPU support. Now testing for capabilities before creating pipeline
This commit is contained in:
parent
97ad42efe6
commit
ace2b2db12
11
aiserver.py
11
aiserver.py
@ -53,6 +53,7 @@ class vars:
|
||||
url = "https://api.inferkit.com/v1/models/standard/generate" # InferKit API URL
|
||||
apikey = ""
|
||||
savedir = getcwd()+"\stories\\newstory.json"
|
||||
hascuda = False
|
||||
|
||||
#==================================================================#
|
||||
# Startup
|
||||
@ -107,8 +108,16 @@ if(vars.model != "InferKit"):
|
||||
if(not vars.noai):
|
||||
print("{0}Initializing transformers, please wait...{1}".format(colors.HEADER, colors.ENDC))
|
||||
from transformers import pipeline, GPT2Tokenizer
|
||||
import torch
|
||||
|
||||
# Is CUDA available? If so, use GPU, otherwise fall back to CPU
|
||||
vars.hascuda = torch.cuda.is_available()
|
||||
|
||||
generator = pipeline('text-generation', model=vars.model, device=0)
|
||||
if(vars.hascuda):
|
||||
generator = pipeline('text-generation', model=vars.model, device=0)
|
||||
else:
|
||||
generator = pipeline('text-generation', model=vars.model)
|
||||
|
||||
tokenizer = GPT2Tokenizer.from_pretrained(vars.model)
|
||||
print("{0}OK! {1} pipeline created!{2}".format(colors.OKGREEN, vars.model, colors.ENDC))
|
||||
else:
|
||||
|
Loading…
Reference in New Issue
Block a user