mirror of
https://github.com/KoboldAI/KoboldAI-Client.git
synced 2025-06-05 21:59:24 +02:00
IPEX fixes
This commit is contained in:
@@ -2,22 +2,12 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import contextlib
|
import contextlib
|
||||||
import torch
|
import torch
|
||||||
import intel_extension_for_pytorch as ipex
|
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
||||||
from .diffusers import ipex_diffusers
|
|
||||||
from .hijacks import ipex_hijacks
|
from .hijacks import ipex_hijacks
|
||||||
from logger import logger
|
|
||||||
|
|
||||||
#ControlNet depth_leres++
|
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
||||||
class DummyDataParallel(torch.nn.Module):
|
|
||||||
def __new__(cls, module, device_ids=None, output_device=None, dim=0):
|
|
||||||
if type(device_ids) is list and len(device_ids) > 1:
|
|
||||||
logger.info("IPEX backend doesn't support DataParallel on multiple XPU devices")
|
|
||||||
return module.to("xpu")
|
|
||||||
|
|
||||||
def return_null_context(*args, **kwargs):
|
def ipex_init(): # pylint: disable=too-many-statements
|
||||||
return contextlib.nullcontext()
|
|
||||||
|
|
||||||
def ipex_init():
|
|
||||||
#Replace cuda with xpu:
|
#Replace cuda with xpu:
|
||||||
torch.cuda.current_device = torch.xpu.current_device
|
torch.cuda.current_device = torch.xpu.current_device
|
||||||
torch.cuda.current_stream = torch.xpu.current_stream
|
torch.cuda.current_stream = torch.xpu.current_stream
|
||||||
@@ -30,6 +20,7 @@ def ipex_init():
|
|||||||
torch.cuda.init = torch.xpu.init
|
torch.cuda.init = torch.xpu.init
|
||||||
torch.cuda.is_available = torch.xpu.is_available
|
torch.cuda.is_available = torch.xpu.is_available
|
||||||
torch.cuda.is_initialized = torch.xpu.is_initialized
|
torch.cuda.is_initialized = torch.xpu.is_initialized
|
||||||
|
torch.cuda.is_current_stream_capturing = lambda: False
|
||||||
torch.cuda.set_device = torch.xpu.set_device
|
torch.cuda.set_device = torch.xpu.set_device
|
||||||
torch.cuda.stream = torch.xpu.stream
|
torch.cuda.stream = torch.xpu.stream
|
||||||
torch.cuda.synchronize = torch.xpu.synchronize
|
torch.cuda.synchronize = torch.xpu.synchronize
|
||||||
@@ -138,8 +129,13 @@ def ipex_init():
|
|||||||
torch.cuda.amp.common.amp_definitely_not_available = lambda: False
|
torch.cuda.amp.common.amp_definitely_not_available = lambda: False
|
||||||
try:
|
try:
|
||||||
torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
|
torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
|
||||||
except Exception:
|
except Exception: # pylint: disable=broad-exception-caught
|
||||||
torch.cuda.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
|
try:
|
||||||
|
from .gradscaler import gradscaler_init # pylint: disable=import-outside-toplevel, import-error
|
||||||
|
gradscaler_init()
|
||||||
|
torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler
|
||||||
|
except Exception: # pylint: disable=broad-exception-caught
|
||||||
|
torch.cuda.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
|
||||||
|
|
||||||
#C
|
#C
|
||||||
torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentStream
|
torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentStream
|
||||||
@@ -156,10 +152,12 @@ def ipex_init():
|
|||||||
torch.cuda.get_device_capability = lambda: [11,7]
|
torch.cuda.get_device_capability = lambda: [11,7]
|
||||||
torch.cuda.get_device_properties.major = 11
|
torch.cuda.get_device_properties.major = 11
|
||||||
torch.cuda.get_device_properties.minor = 7
|
torch.cuda.get_device_properties.minor = 7
|
||||||
torch.backends.cuda.sdp_kernel = return_null_context
|
|
||||||
torch.nn.DataParallel = DummyDataParallel
|
|
||||||
torch.cuda.ipc_collect = lambda: None
|
torch.cuda.ipc_collect = lambda: None
|
||||||
torch.cuda.utilization = lambda: 0
|
torch.cuda.utilization = lambda: 0
|
||||||
|
|
||||||
ipex_hijacks()
|
ipex_hijacks()
|
||||||
ipex_diffusers()
|
try:
|
||||||
|
from .diffusers import ipex_diffusers # pylint: disable=import-outside-toplevel, import-error
|
||||||
|
ipex_diffusers()
|
||||||
|
except Exception: # pylint: disable=broad-exception-caught
|
||||||
|
pass
|
||||||
|
@@ -1,11 +1,13 @@
|
|||||||
import torch
|
import torch
|
||||||
import intel_extension_for_pytorch as ipex
|
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F # pylint: disable=ungrouped-imports
|
||||||
import diffusers #0.20.2
|
import diffusers #0.20.2 # pylint: disable=import-error
|
||||||
|
|
||||||
|
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
||||||
|
|
||||||
Attention = diffusers.models.attention_processor.Attention
|
Attention = diffusers.models.attention_processor.Attention
|
||||||
|
|
||||||
class SlicedAttnProcessor:
|
class SlicedAttnProcessor: # pylint: disable=too-few-public-methods
|
||||||
r"""
|
r"""
|
||||||
Processor for implementing sliced attention.
|
Processor for implementing sliced attention.
|
||||||
|
|
||||||
@@ -18,7 +20,7 @@ class SlicedAttnProcessor:
|
|||||||
def __init__(self, slice_size):
|
def __init__(self, slice_size):
|
||||||
self.slice_size = slice_size
|
self.slice_size = slice_size
|
||||||
|
|
||||||
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
|
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None): # pylint: disable=too-many-statements, too-many-locals, too-many-branches
|
||||||
residual = hidden_states
|
residual = hidden_states
|
||||||
|
|
||||||
input_ndim = hidden_states.ndim
|
input_ndim = hidden_states.ndim
|
||||||
@@ -74,7 +76,7 @@ class SlicedAttnProcessor:
|
|||||||
end_idx = (i + 1) * self.slice_size
|
end_idx = (i + 1) * self.slice_size
|
||||||
|
|
||||||
if do_split_2:
|
if do_split_2:
|
||||||
for i2 in range(query_tokens // split_2_slice_size):
|
for i2 in range(query_tokens // split_2_slice_size): # pylint: disable=invalid-name
|
||||||
start_idx_2 = i2 * split_2_slice_size
|
start_idx_2 = i2 * split_2_slice_size
|
||||||
end_idx_2 = (i2 + 1) * split_2_slice_size
|
end_idx_2 = (i2 + 1) * split_2_slice_size
|
||||||
|
|
||||||
@@ -114,7 +116,7 @@ class SlicedAttnProcessor:
|
|||||||
|
|
||||||
return hidden_states
|
return hidden_states
|
||||||
|
|
||||||
class AttnProcessor2_0:
|
class AttnProcessor2_0: # pylint: disable=too-few-public-methods, invalid-name
|
||||||
r"""
|
r"""
|
||||||
Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
|
Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
|
||||||
"""
|
"""
|
||||||
@@ -123,7 +125,7 @@ class AttnProcessor2_0:
|
|||||||
if not hasattr(F, "scaled_dot_product_attention"):
|
if not hasattr(F, "scaled_dot_product_attention"):
|
||||||
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
||||||
|
|
||||||
def __call__(
|
def __call__( # pylint: disable=too-many-arguments, too-many-statements, too-many-locals, too-many-branches
|
||||||
self,
|
self,
|
||||||
attn: Attention,
|
attn: Attention,
|
||||||
hidden_states,
|
hidden_states,
|
||||||
@@ -208,7 +210,7 @@ class AttnProcessor2_0:
|
|||||||
start_idx = i * split_slice_size
|
start_idx = i * split_slice_size
|
||||||
end_idx = (i + 1) * split_slice_size
|
end_idx = (i + 1) * split_slice_size
|
||||||
if do_split_2:
|
if do_split_2:
|
||||||
for i2 in range(query_tokens // split_2_slice_size):
|
for i2 in range(query_tokens // split_2_slice_size): # pylint: disable=invalid-name
|
||||||
start_idx_2 = i2 * split_2_slice_size
|
start_idx_2 = i2 * split_2_slice_size
|
||||||
end_idx_2 = (i2 + 1) * split_2_slice_size
|
end_idx_2 = (i2 + 1) * split_2_slice_size
|
||||||
|
|
||||||
|
@@ -1,8 +1,11 @@
|
|||||||
import torch
|
import contextlib
|
||||||
import intel_extension_for_pytorch as ipex
|
|
||||||
import importlib
|
import importlib
|
||||||
|
import torch
|
||||||
|
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
||||||
|
|
||||||
class CondFunc:
|
# pylint: disable=protected-access, missing-function-docstring, line-too-long, unnecessary-lambda, no-else-return
|
||||||
|
|
||||||
|
class CondFunc: # pylint: disable=missing-class-docstring
|
||||||
def __new__(cls, orig_func, sub_func, cond_func):
|
def __new__(cls, orig_func, sub_func, cond_func):
|
||||||
self = super(CondFunc, cls).__new__(cls)
|
self = super(CondFunc, cls).__new__(cls)
|
||||||
if isinstance(orig_func, str):
|
if isinstance(orig_func, str):
|
||||||
@@ -29,6 +32,45 @@ class CondFunc:
|
|||||||
else:
|
else:
|
||||||
return self.__orig_func(*args, **kwargs)
|
return self.__orig_func(*args, **kwargs)
|
||||||
|
|
||||||
|
_utils = torch.utils.data._utils
|
||||||
|
def _shutdown_workers(self):
|
||||||
|
if _utils is None or _utils.python_exit_status is True or _utils.python_exit_status is None:
|
||||||
|
return
|
||||||
|
if hasattr(self, "_shutdown") and not self._shutdown:
|
||||||
|
self._shutdown = True
|
||||||
|
try:
|
||||||
|
if hasattr(self, '_pin_memory_thread'):
|
||||||
|
self._pin_memory_thread_done_event.set()
|
||||||
|
self._worker_result_queue.put((None, None))
|
||||||
|
self._pin_memory_thread.join()
|
||||||
|
self._worker_result_queue.cancel_join_thread()
|
||||||
|
self._worker_result_queue.close()
|
||||||
|
self._workers_done_event.set()
|
||||||
|
for worker_id in range(len(self._workers)):
|
||||||
|
if self._persistent_workers or self._workers_status[worker_id]:
|
||||||
|
self._mark_worker_as_unavailable(worker_id, shutdown=True)
|
||||||
|
for w in self._workers: # pylint: disable=invalid-name
|
||||||
|
w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
|
||||||
|
for q in self._index_queues: # pylint: disable=invalid-name
|
||||||
|
q.cancel_join_thread()
|
||||||
|
q.close()
|
||||||
|
finally:
|
||||||
|
if self._worker_pids_set:
|
||||||
|
_utils.signal_handling._remove_worker_pids(id(self))
|
||||||
|
self._worker_pids_set = False
|
||||||
|
for w in self._workers: # pylint: disable=invalid-name
|
||||||
|
if w.is_alive():
|
||||||
|
w.terminate()
|
||||||
|
|
||||||
|
class DummyDataParallel(torch.nn.Module): # pylint: disable=missing-class-docstring, unused-argument, too-few-public-methods
|
||||||
|
def __new__(cls, module, device_ids=None, output_device=None, dim=0): # pylint: disable=unused-argument
|
||||||
|
if isinstance(device_ids, list) and len(device_ids) > 1:
|
||||||
|
print("IPEX backend doesn't support DataParallel on multiple XPU devices")
|
||||||
|
return module.to("xpu")
|
||||||
|
|
||||||
|
def return_null_context(*args, **kwargs): # pylint: disable=unused-argument
|
||||||
|
return contextlib.nullcontext()
|
||||||
|
|
||||||
def check_device(device):
|
def check_device(device):
|
||||||
return bool((isinstance(device, torch.device) and device.type == "cuda") or (isinstance(device, str) and "cuda" in device) or isinstance(device, int))
|
return bool((isinstance(device, torch.device) and device.type == "cuda") or (isinstance(device, str) and "cuda" in device) or isinstance(device, int))
|
||||||
|
|
||||||
@@ -51,25 +93,25 @@ def ipex_autocast(*args, **kwargs):
|
|||||||
return original_autocast(*args, **kwargs)
|
return original_autocast(*args, **kwargs)
|
||||||
|
|
||||||
original_torch_cat = torch.cat
|
original_torch_cat = torch.cat
|
||||||
def torch_cat(input, *args, **kwargs):
|
def torch_cat(tensor, *args, **kwargs):
|
||||||
if len(input) == 3 and (input[0].dtype != input[1].dtype or input[2].dtype != input[1].dtype):
|
if len(tensor) == 3 and (tensor[0].dtype != tensor[1].dtype or tensor[2].dtype != tensor[1].dtype):
|
||||||
return original_torch_cat([input[0].to(input[1].dtype), input[1], input[2].to(input[1].dtype)], *args, **kwargs)
|
return original_torch_cat([tensor[0].to(tensor[1].dtype), tensor[1], tensor[2].to(tensor[1].dtype)], *args, **kwargs)
|
||||||
else:
|
else:
|
||||||
return original_torch_cat(input, *args, **kwargs)
|
return original_torch_cat(tensor, *args, **kwargs)
|
||||||
|
|
||||||
original_interpolate = torch.nn.functional.interpolate
|
original_interpolate = torch.nn.functional.interpolate
|
||||||
def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None, recompute_scale_factor=None, antialias=False):
|
def interpolate(tensor, size=None, scale_factor=None, mode='nearest', align_corners=None, recompute_scale_factor=None, antialias=False): # pylint: disable=too-many-arguments
|
||||||
if antialias:
|
if antialias or align_corners is not None:
|
||||||
return_device = input.device
|
return_device = tensor.device
|
||||||
return_dtype = input.dtype
|
return_dtype = tensor.dtype
|
||||||
return original_interpolate(input.to("cpu", dtype=torch.float32), size=size, scale_factor=scale_factor, mode=mode,
|
return original_interpolate(tensor.to("cpu", dtype=torch.float32), size=size, scale_factor=scale_factor, mode=mode,
|
||||||
align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias).to(return_device, dtype=return_dtype)
|
align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias).to(return_device, dtype=return_dtype)
|
||||||
else:
|
else:
|
||||||
return original_interpolate(input, size=size, scale_factor=scale_factor, mode=mode,
|
return original_interpolate(tensor, size=size, scale_factor=scale_factor, mode=mode,
|
||||||
align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias)
|
align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, antialias=antialias)
|
||||||
|
|
||||||
original_linalg_solve = torch.linalg.solve
|
original_linalg_solve = torch.linalg.solve
|
||||||
def linalg_solve(orig_func, A, B, *args, **kwargs):
|
def linalg_solve(A, B, *args, **kwargs): # pylint: disable=invalid-name
|
||||||
if A.device != torch.device("cpu") or B.device != torch.device("cpu"):
|
if A.device != torch.device("cpu") or B.device != torch.device("cpu"):
|
||||||
return_device = A.device
|
return_device = A.device
|
||||||
original_linalg_solve(A.to("cpu"), B.to("cpu"), *args, **kwargs).to(return_device)
|
original_linalg_solve(A.to("cpu"), B.to("cpu"), *args, **kwargs).to(return_device)
|
||||||
@@ -101,10 +143,13 @@ def ipex_hijacks():
|
|||||||
CondFunc('torch.tensor',
|
CondFunc('torch.tensor',
|
||||||
lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
|
lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
|
||||||
lambda orig_func, *args, device=None, **kwargs: check_device(device))
|
lambda orig_func, *args, device=None, **kwargs: check_device(device))
|
||||||
|
CondFunc('torch.linspace',
|
||||||
|
lambda orig_func, *args, device=None, **kwargs: orig_func(*args, device=return_xpu(device), **kwargs),
|
||||||
|
lambda orig_func, *args, device=None, **kwargs: check_device(device))
|
||||||
|
|
||||||
CondFunc('torch.Generator',
|
CondFunc('torch.Generator',
|
||||||
lambda orig_func, device: torch.xpu.Generator(device),
|
lambda orig_func, device=None: torch.xpu.Generator(device),
|
||||||
lambda orig_func, device: device != torch.device("cpu") and device != "cpu")
|
lambda orig_func, device=None: device is not None and device != torch.device("cpu") and device != "cpu")
|
||||||
|
|
||||||
CondFunc('torch.batch_norm',
|
CondFunc('torch.batch_norm',
|
||||||
lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
|
lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
|
||||||
@@ -115,12 +160,15 @@ def ipex_hijacks():
|
|||||||
lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
|
lambda orig_func, input, weight, bias, *args, **kwargs: orig_func(input,
|
||||||
weight if weight is not None else torch.ones(input.size()[1], device=input.device),
|
weight if weight is not None else torch.ones(input.size()[1], device=input.device),
|
||||||
bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs),
|
bias if bias is not None else torch.zeros(input.size()[1], device=input.device), *args, **kwargs),
|
||||||
lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu"))
|
lambda orig_func, input, *args, **kwargs: input.device != torch.device("cpu"))
|
||||||
|
|
||||||
#Functions with dtype errors:
|
#Functions with dtype errors:
|
||||||
CondFunc('torch.nn.modules.GroupNorm.forward',
|
CondFunc('torch.nn.modules.GroupNorm.forward',
|
||||||
lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
|
lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
|
||||||
lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
|
lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
|
||||||
|
CondFunc('torch.nn.modules.linear.Linear.forward',
|
||||||
|
lambda orig_func, self, input: orig_func(self, input.to(self.weight.data.dtype)),
|
||||||
|
lambda orig_func, self, input: input.dtype != self.weight.data.dtype)
|
||||||
CondFunc('torch.bmm',
|
CondFunc('torch.bmm',
|
||||||
lambda orig_func, input, mat2, *args, **kwargs: orig_func(input, mat2.to(input.dtype), *args, **kwargs),
|
lambda orig_func, input, mat2, *args, **kwargs: orig_func(input, mat2.to(input.dtype), *args, **kwargs),
|
||||||
lambda orig_func, input, mat2, *args, **kwargs: input.dtype != mat2.dtype)
|
lambda orig_func, input, mat2, *args, **kwargs: input.dtype != mat2.dtype)
|
||||||
@@ -142,7 +190,10 @@ def ipex_hijacks():
|
|||||||
lambda orig_func, *args, **kwargs: True)
|
lambda orig_func, *args, **kwargs: True)
|
||||||
|
|
||||||
#Functions that make compile mad with CondFunc:
|
#Functions that make compile mad with CondFunc:
|
||||||
|
torch.utils.data.dataloader._MultiProcessingDataLoaderIter._shutdown_workers = _shutdown_workers
|
||||||
|
torch.nn.DataParallel = DummyDataParallel
|
||||||
torch.autocast = ipex_autocast
|
torch.autocast = ipex_autocast
|
||||||
torch.cat = torch_cat
|
torch.cat = torch_cat
|
||||||
torch.linalg.solve = linalg_solve
|
torch.linalg.solve = linalg_solve
|
||||||
torch.nn.functional.interpolate = interpolate
|
torch.nn.functional.interpolate = interpolate
|
||||||
|
torch.backends.cuda.sdp_kernel = return_null_context
|
||||||
|
Reference in New Issue
Block a user