mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
@@ -23,7 +23,7 @@ dependencies:
|
|||||||
- Pillow
|
- Pillow
|
||||||
- psutil
|
- psutil
|
||||||
- pip:
|
- pip:
|
||||||
- -f https://developer.intel.com/ipex-whl-stable-xpu
|
- --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||||
- torch==2.0.1a0; sys_platform == 'linux'
|
- torch==2.0.1a0; sys_platform == 'linux'
|
||||||
- torch==2.0.0a0; sys_platform == 'win32'
|
- torch==2.0.0a0; sys_platform == 'win32'
|
||||||
- intel_extension_for_pytorch==2.0.110+xpu; sys_platform == 'linux'
|
- intel_extension_for_pytorch==2.0.110+xpu; sys_platform == 'linux'
|
||||||
@@ -35,7 +35,7 @@ dependencies:
|
|||||||
- lupa==1.10
|
- lupa==1.10
|
||||||
- transformers[sentencepiece]==4.34.0
|
- transformers[sentencepiece]==4.34.0
|
||||||
- huggingface_hub==0.16.4
|
- huggingface_hub==0.16.4
|
||||||
- optimum[openvino,nncf,neural-compressor]==1.12.0
|
- optimum[onnxruntime,openvino,nncf,neural-compressor]==1.13.2
|
||||||
- safetensors==0.3.3
|
- safetensors==0.3.3
|
||||||
- accelerate==0.21.0
|
- accelerate==0.21.0
|
||||||
- git+https://github.com/VE-FORBRYDERNE/mkultra
|
- git+https://github.com/VE-FORBRYDERNE/mkultra
|
||||||
|
@@ -16,7 +16,6 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
torch.cuda.device = torch.xpu.device
|
torch.cuda.device = torch.xpu.device
|
||||||
torch.cuda.device_count = torch.xpu.device_count
|
torch.cuda.device_count = torch.xpu.device_count
|
||||||
torch.cuda.device_of = torch.xpu.device_of
|
torch.cuda.device_of = torch.xpu.device_of
|
||||||
torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard
|
|
||||||
torch.cuda.get_device_name = torch.xpu.get_device_name
|
torch.cuda.get_device_name = torch.xpu.get_device_name
|
||||||
torch.cuda.get_device_properties = torch.xpu.get_device_properties
|
torch.cuda.get_device_properties = torch.xpu.get_device_properties
|
||||||
torch.cuda.init = torch.xpu.init
|
torch.cuda.init = torch.xpu.init
|
||||||
@@ -145,7 +144,7 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
ipex._C._DeviceProperties.minor = 2
|
ipex._C._DeviceProperties.minor = 2
|
||||||
|
|
||||||
#Fix functions with ipex:
|
#Fix functions with ipex:
|
||||||
torch.cuda.mem_get_info = lambda device=None: [(torch.xpu.get_device_properties(device).total_memory - torch.xpu.memory_allocated(device)), torch.xpu.get_device_properties(device).total_memory]
|
torch.cuda.mem_get_info = lambda device=None: [(torch.xpu.get_device_properties(device).total_memory - torch.xpu.memory_reserved(device)), torch.xpu.get_device_properties(device).total_memory]
|
||||||
torch._utils._get_available_device_type = lambda: "xpu"
|
torch._utils._get_available_device_type = lambda: "xpu"
|
||||||
torch.has_cuda = True
|
torch.has_cuda = True
|
||||||
torch.cuda.has_half = True
|
torch.cuda.has_half = True
|
||||||
@@ -157,6 +156,12 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
torch.cuda.get_device_properties.minor = 7
|
torch.cuda.get_device_properties.minor = 7
|
||||||
torch.cuda.ipc_collect = lambda *args, **kwargs: None
|
torch.cuda.ipc_collect = lambda *args, **kwargs: None
|
||||||
torch.cuda.utilization = lambda *args, **kwargs: 0
|
torch.cuda.utilization = lambda *args, **kwargs: 0
|
||||||
|
if hasattr(torch.xpu, 'getDeviceIdListForCard'):
|
||||||
|
torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard
|
||||||
|
torch.cuda.get_device_id_list_per_card = torch.xpu.getDeviceIdListForCard
|
||||||
|
else:
|
||||||
|
torch.cuda.getDeviceIdListForCard = torch.xpu.get_device_id_list_per_card
|
||||||
|
torch.cuda.get_device_id_list_per_card = torch.xpu.get_device_id_list_per_card
|
||||||
|
|
||||||
ipex_hijacks()
|
ipex_hijacks()
|
||||||
attention_init()
|
attention_init()
|
||||||
|
@@ -10,13 +10,15 @@ def torch_bmm(input, mat2, *, out=None):
|
|||||||
|
|
||||||
#ARC GPUs can't allocate more than 4GB to a single block, Slice it:
|
#ARC GPUs can't allocate more than 4GB to a single block, Slice it:
|
||||||
batch_size_attention, input_tokens, mat2_shape = input.shape[0], input.shape[1], mat2.shape[2]
|
batch_size_attention, input_tokens, mat2_shape = input.shape[0], input.shape[1], mat2.shape[2]
|
||||||
block_multiply = 2.4 if input.dtype == torch.float32 else 1.2
|
block_multiply = input.element_size()
|
||||||
block_size = (batch_size_attention * input_tokens * mat2_shape) / 1024 * block_multiply #MB
|
slice_block_size = input_tokens * mat2_shape / 1024 / 1024 * block_multiply
|
||||||
|
block_size = batch_size_attention * slice_block_size
|
||||||
|
|
||||||
split_slice_size = batch_size_attention
|
split_slice_size = batch_size_attention
|
||||||
if block_size >= 4000:
|
if block_size > 4:
|
||||||
do_split = True
|
do_split = True
|
||||||
#Find something divisible with the input_tokens
|
#Find something divisible with the input_tokens
|
||||||
while ((split_slice_size * input_tokens * mat2_shape) / 1024 * block_multiply) > 4000:
|
while (split_slice_size * slice_block_size) > 4:
|
||||||
split_slice_size = split_slice_size // 2
|
split_slice_size = split_slice_size // 2
|
||||||
if split_slice_size <= 1:
|
if split_slice_size <= 1:
|
||||||
split_slice_size = 1
|
split_slice_size = 1
|
||||||
@@ -24,12 +26,12 @@ def torch_bmm(input, mat2, *, out=None):
|
|||||||
else:
|
else:
|
||||||
do_split = False
|
do_split = False
|
||||||
|
|
||||||
split_block_size = (split_slice_size * input_tokens * mat2_shape) / 1024 * block_multiply #MB
|
|
||||||
split_2_slice_size = input_tokens
|
split_2_slice_size = input_tokens
|
||||||
if split_block_size >= 4000:
|
if split_slice_size * slice_block_size > 4:
|
||||||
|
slice_block_size2 = split_slice_size * mat2_shape / 1024 / 1024 * block_multiply
|
||||||
do_split_2 = True
|
do_split_2 = True
|
||||||
#Find something divisible with the input_tokens
|
#Find something divisible with the input_tokens
|
||||||
while ((split_slice_size * split_2_slice_size * mat2_shape) / 1024 * block_multiply) > 4000:
|
while (split_2_slice_size * slice_block_size2) > 4:
|
||||||
split_2_slice_size = split_2_slice_size // 2
|
split_2_slice_size = split_2_slice_size // 2
|
||||||
if split_2_slice_size <= 1:
|
if split_2_slice_size <= 1:
|
||||||
split_2_slice_size = 1
|
split_2_slice_size = 1
|
||||||
@@ -71,13 +73,16 @@ def scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.
|
|||||||
else:
|
else:
|
||||||
shape_one, batch_size_attention, query_tokens, shape_four = query.shape
|
shape_one, batch_size_attention, query_tokens, shape_four = query.shape
|
||||||
no_shape_one = False
|
no_shape_one = False
|
||||||
block_multiply = 3.6 if query.dtype == torch.float32 else 1.8
|
|
||||||
block_size = (shape_one * batch_size_attention * query_tokens * shape_four) / 1024 * block_multiply #MB
|
block_multiply = query.element_size()
|
||||||
|
slice_block_size = shape_one * query_tokens * shape_four / 1024 / 1024 * block_multiply
|
||||||
|
block_size = batch_size_attention * slice_block_size
|
||||||
|
|
||||||
split_slice_size = batch_size_attention
|
split_slice_size = batch_size_attention
|
||||||
if block_size >= 4000:
|
if block_size > 4:
|
||||||
do_split = True
|
do_split = True
|
||||||
#Find something divisible with the shape_one
|
#Find something divisible with the shape_one
|
||||||
while ((shape_one * split_slice_size * query_tokens * shape_four) / 1024 * block_multiply) > 4000:
|
while (split_slice_size * slice_block_size) > 4:
|
||||||
split_slice_size = split_slice_size // 2
|
split_slice_size = split_slice_size // 2
|
||||||
if split_slice_size <= 1:
|
if split_slice_size <= 1:
|
||||||
split_slice_size = 1
|
split_slice_size = 1
|
||||||
@@ -85,12 +90,12 @@ def scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.
|
|||||||
else:
|
else:
|
||||||
do_split = False
|
do_split = False
|
||||||
|
|
||||||
split_block_size = (shape_one * split_slice_size * query_tokens * shape_four) / 1024 * block_multiply #MB
|
|
||||||
split_2_slice_size = query_tokens
|
split_2_slice_size = query_tokens
|
||||||
if split_block_size >= 4000:
|
if split_slice_size * slice_block_size > 4:
|
||||||
|
slice_block_size2 = shape_one * split_slice_size * shape_four / 1024 / 1024 * block_multiply
|
||||||
do_split_2 = True
|
do_split_2 = True
|
||||||
#Find something divisible with the batch_size_attention
|
#Find something divisible with the batch_size_attention
|
||||||
while ((shape_one * split_slice_size * split_2_slice_size * shape_four) / 1024 * block_multiply) > 4000:
|
while (split_2_slice_size * slice_block_size2) > 4:
|
||||||
split_2_slice_size = split_2_slice_size // 2
|
split_2_slice_size = split_2_slice_size // 2
|
||||||
if split_2_slice_size <= 1:
|
if split_2_slice_size <= 1:
|
||||||
split_2_slice_size = 1
|
split_2_slice_size = 1
|
||||||
|
@@ -55,13 +55,14 @@ class SlicedAttnProcessor: # pylint: disable=too-few-public-methods
|
|||||||
)
|
)
|
||||||
|
|
||||||
#ARC GPUs can't allocate more than 4GB to a single block, Slice it:
|
#ARC GPUs can't allocate more than 4GB to a single block, Slice it:
|
||||||
block_multiply = 2.4 if query.dtype == torch.float32 else 1.2
|
block_multiply = query.element_size()
|
||||||
block_size = (batch_size_attention * query_tokens * shape_three) / 1024 * block_multiply #MB
|
slice_block_size = self.slice_size * shape_three / 1024 / 1024 * block_multiply
|
||||||
|
block_size = query_tokens * slice_block_size
|
||||||
split_2_slice_size = query_tokens
|
split_2_slice_size = query_tokens
|
||||||
if block_size >= 4000:
|
if block_size > 4:
|
||||||
do_split_2 = True
|
do_split_2 = True
|
||||||
#Find something divisible with the query_tokens
|
#Find something divisible with the query_tokens
|
||||||
while ((self.slice_size * split_2_slice_size * shape_three) / 1024 * block_multiply) > 4000:
|
while (split_2_slice_size * slice_block_size) > 4:
|
||||||
split_2_slice_size = split_2_slice_size // 2
|
split_2_slice_size = split_2_slice_size // 2
|
||||||
if split_2_slice_size <= 1:
|
if split_2_slice_size <= 1:
|
||||||
split_2_slice_size = 1
|
split_2_slice_size = 1
|
||||||
|
Reference in New Issue
Block a user