1import collections.abc as cabc
2import typing as typ
3
4import numpy as np
5import pyxu.info.ptype as pyct
6import pyxu.operator.linop.diff as pydiff
7import pyxu.operator.linop.filter as pyfilt
8import pyxu.util as pycu
9
10from pyxu_diffops.operator.diffusion._diffusion import _Diffusion
11from pyxu_diffops.operator.diffusion._diffusion_coeff import (
12 DiffusionCoeffAnisoCoherenceEnhancing,
13 DiffusionCoeffAnisoEdgeEnhancing,
14 DiffusionCoeffAnisotropic,
15 DiffusionCoeffIsotropic,
16 _DiffusionCoefficient,
17)
18from pyxu_diffops.operator.diffusion._diffusivity import (
19 MfiDiffusivity,
20 PeronaMalikDiffusivity,
21 TotalVariationDiffusivity,
22)
23from pyxu_diffops.operator.diffusion._extra_diffusion_term import (
24 CurvaturePreservingTerm,
25 MfiExtraTerm,
26)
27
28__all__ = [
29 "MfiDiffusion",
30 "PeronaMalikDiffusion",
31 "TikhonovDiffusion",
32 "TotalVariationDiffusion",
33 "CurvaturePreservingDiffusionOp",
34 "AnisEdgeEnhancingDiffusionOp",
35 "AnisCoherenceEnhancingDiffusionOp",
36 "AnisDiffusionOp",
37]
38# "AnisMfiDiffusionOp"]
39
40# pxa.LinOp
41
42
[docs]
43class MfiDiffusion(_Diffusion):
44 r"""
45 Minimum Fisher Information (MFI) diffusion operator, featuring an inhomogeneous isotropic diffusion tensor.
46 Inspired from minimally informative prior principles, it is popular in the plasma physics tomography community.
47 The diffusivity decreases for increasing local intensity values, to preserve bright features.
48
49 The MFI diffusion tensor is defined, for the :math:`i`-th pixel, as:
50
51 .. math::
52 \big(\mathbf{D}(\mathbf{f})\big)_i = (g(\mathbf{f}))_i \;\mathbf{I} = \begin{pmatrix}(g(\mathbf{f}))_i & 0\\ 0 & (g(\mathbf{f}))_i\end{pmatrix},
53
54 where the diffusivity :math:`g` is defined in two possible ways:
55
56 * in the *tame* case as
57
58 .. math::
59 (g(\mathbf{f}))_i = \frac{1}{1+\max\{\delta, f_i\}/\beta},\quad\delta\ll 1,
60 :math:`\quad\;\;` where :math:`\beta` is a contrast parameter,
61
62 * in the *untame* case as
63
64 .. math::
65 (g(\mathbf{f}))_i = \frac{1}{\max\{\delta, f_i\}},\quad\delta\ll 1,
66
67 The gradient of the operator reads
68
69 .. math::
70 -\mathrm{div}(\mathbf{D}(\mathbf{f})\boldsymbol{\nabla}\mathbf{f})-\sum_{i=0}^{N_{tot}-1}\frac{\vert(\boldsymbol{\nabla}\mathbf{f})_i\vert^2}{(g(\mathbf{f}))_i^2}\;,
71
72 where the sum is an optional balloon force `extra_term`, which can be included or not.
73
74 We recommend using the *tame* version, since it is better behaved: the *untame* version requires very small
75 steps to be stable. Furthermore, we recommend including the extra term, because in this case the
76 potential :math:`\phi` can be defined.
77
78 Parameters
79 ----------
80 dim_shape: NDArrayShape
81 Shape of the input array.
82 extra_term: bool
83 Whether the extra term arising from the differentiation of the MFI functional should be
84 included. Defaults to `True` (recommended). If set to `False` (*linearized MFI*), the MFI diffusion operator
85 does not admit a potential and the ``apply()`` method is not defined.
86 beta: Real
87 Contrast parameter, determines the magnitude above which image gradients are preserved. Defaults to 1.
88 clipping_value: Real
89 Clipping value :math:`\delta` in the expression of the MFI diffusivity. Defaults to 1e-5.
90 tame: bool
91 Whether tame or untame version should be used. Defaults to `True` (tame, recommended).
92 sampling: Real, list[Real]
93 Sampling step (i.e. distance between two consecutive elements of an array).
94 Defaults to 1.
95
96 Returns
97 -------
98 op: OpT
99 MFI diffusion operator.
100
101 """
102
103 def __init__(
104 self,
105 dim_shape: pyct.NDArrayShape,
106 extra_term: bool = True,
107 beta: pyct.Real = 1,
108 clipping_value: pyct.Real = 1e-5,
109 tame: bool = True,
110 sampling: typ.Union[pyct.Real, cabc.Sequence[pyct.Real, ...]] = 1,
111 ):
112 gradient = pydiff.Gradient(
113 dim_shape=dim_shape,
114 directions=(1, 2),
115 diff_method="fd",
116 scheme="forward",
117 mode="symmetric",
118 sampling=sampling,
119 )
120 mfi_diffusion_coeff = DiffusionCoeffIsotropic(
121 dim_shape=dim_shape,
122 diffusivity=MfiDiffusivity(dim_shape=dim_shape, beta=beta, clipping_value=clipping_value, tame=tame),
123 )
124 if extra_term:
125 mfi_extra_term = MfiExtraTerm(
126 dim_shape=dim_shape, gradient=gradient, beta=beta, clipping_value=clipping_value, tame=tame
127 )
128 super().__init__(
129 dim_shape,
130 gradient=gradient,
131 diffusion_coefficient=mfi_diffusion_coeff,
132 extra_diffusion_term=mfi_extra_term,
133 )
134 else:
135 super().__init__(dim_shape, gradient=gradient, diffusion_coefficient=mfi_diffusion_coeff)
136 # initialize lipschitz constants. should this be done instead inside _DiffusionOp.__init__ ??
137 if tame:
138 self.diff_lipschitz = gradient.lipschitz**2
139 else:
140 self.diff_lipschitz = gradient.lipschitz**2 / clipping_value
141 # REMARK: I think that with extra term we don't have lipschitzianity, only Holder-continuity order 2.
142 # However, I did not observe instability in practice. I think it's due to denominator or order norm(x)!
143 # if that's true, a reasonable estimate would be the following (doubling diff_lipschitz)
144 if extra_term:
145 self.diff_lipschitz *= 2
146
147 def apply(self, arr: pyct.NDArray) -> pyct.Real:
148 if self.extra_diffusion_term is not None:
149 xp = pycu.get_array_module(arr) # (batch,nchannels,nx,ny)
150 y = self._compute_divergence_term(arr) # (batch,nchannels,nx,ny)
151 return xp.einsum("...jkl,...jkl->...", y, arr) / 2 # (batch,)
152 else:
153 raise RuntimeError(
154 "'apply()' method not defined for MfiDiffusionOp with no MfiExtraTerm: no underlying variational intepretation"
155 )
156
157
[docs]
158class PeronaMalikDiffusion(_Diffusion):
159 r"""
160 Perona-Malik diffusion operator, featuring an inhomogeneous isotropic diffusion tensor with Perona-Malik diffusivity. It can be used for edge-preserving smoothing.
161 It reduces the diffusion intensity in locations characterised by large gradients, effectively achieving edge-preservation.
162
163 The gradient of the operator reads :math:`\nabla\phi(\mathbf{f})=-\mathrm{div}(\mathbf{D}(\mathbf{f})\boldsymbol{\nabla}\mathbf{f})`,
164 where :math:`\phi` is the potential.
165
166 The Perona-Malik diffusion tensor is defined, for the :math:`i`-th pixel, as:
167
168 .. math::
169
170 \big(\mathbf{D}(\mathbf{f})\big)_i = (g(\mathbf{f}))_i \;\mathbf{I} = \begin{pmatrix}(g(\mathbf{f}))_i & 0\\ 0 & (g(\mathbf{f}))_i\end{pmatrix},
171
172 where the diffusivity with contrast parameter :math:`\beta` is defined as:
173
174 * :math:`(g(\mathbf{f}))_i = \exp(-\vert (\nabla_\sigma \mathbf{f})_i \vert ^2 / \beta^2)` in the exponential case,
175
176 * :math:`(g(\mathbf{f}))_i = 1/\big( 1+\vert (\nabla_\sigma \mathbf{f})_i \vert ^2 / \beta^2\big)` in the rational case.
177
178 Gaussian derivatives with width :math:`\sigma` are used for the gradient as customary with the ill-posed Perona-Malik diffusion process.
179
180 In both cases, the corresponding divergence-based diffusion term admits a potential
181 (see [Tschumperle]_ for the exponential case): the ``apply()`` method is well-defined.
182
183 Parameters
184 ----------
185 dim_shape: NDArrayShape
186 Shape of the input array.
187 beta: Real
188 Contrast parameter, determines the magnitude above which image gradients are preserved. Defaults to 1.
189 pm_fct: str
190 Perona-Malik diffusivity function. Must be either 'exponential' or 'rational'.
191 sigma_gd: Real
192 Standard deviation for kernel of Gaussian derivatives used for gradient computation. Defaults to 1.
193 sampling: Real, list[Real]
194 Sampling step (i.e. distance between two consecutive elements of an array).
195 Defaults to 1.
196
197 Returns
198 -------
199 op: OpT
200 Perona-Malik diffusion operator.
201
202 Example
203 -------
204
205 .. plot::
206
207 import numpy as np
208 import matplotlib.pyplot as plt
209 import pyxu.opt.solver as pysol
210 import pyxu.abc.solver as pysolver
211 import pyxu.opt.stop as pystop
212 import pyxu_diffops.operator as pyxop
213 import skimage as skim
214
215 # Import RGB image
216 image = skim.data.cat().astype(float)
217 print(image.shape) # (300, 451, 3)
218
219 # Move color-stacking axis to front (needed for pyxu stacking convention)
220 image = np.moveaxis(image, 2, 0)
221 print(image.shape) # (3, 300, 451)
222
223 # Instantiate diffusion operator
224 pm_diffop = pyxop.PeronaMalikDiffusion(dim_shape=(3, 300, 451), beta=5)
225
226 # Define PGD solver, with stopping criterion and starting point x0
227 stop_crit = pystop.MaxIter(n=100)
228
229 # Perform 50 gradient flow iterations starting from x0
230 PGD = pysol.PGD(f=pm_diffop, show_progress=False, verbosity=100)
231 PGD.fit(**dict(mode=pysolver.SolverMode.BLOCK, x0=image, stop_crit=stop_crit,
232 tau=2 / pm_diffop.diff_lipschitz))
233 pm_smoothed_image = PGD.solution()
234
235 # Reshape images for plotting
236 image = np.moveaxis(image, 0, 2)
237 pm_smoothed_image = np.moveaxis(pm_smoothed_image, 0, 2)
238
239 # Plot
240 fig, ax = plt.subplots(1, 2, figsize=(10, 5))
241 ax[0].imshow(image.astype(int))
242 ax[0].set_title("Image", fontsize=15)
243 ax[0].axis('off')
244 ax[1].imshow(pm_smoothed_image.astype(int))
245 ax[1].set_title("100 iterations Perona Malik diffusion", fontsize=15)
246 ax[1].axis('off')
247
248 """
249
250 def __init__(
251 self,
252 dim_shape: pyct.NDArrayShape,
253 beta: pyct.Real = 1,
254 pm_fct: str = "exponential",
255 sigma_gd: pyct.Real = 1,
256 sampling: typ.Union[pyct.Real, cabc.Sequence[pyct.Real, ...]] = 1,
257 ):
258 gradient = pydiff.Gradient(
259 dim_shape=dim_shape,
260 directions=(1, 2),
261 diff_method="fd",
262 scheme="forward",
263 mode="symmetric",
264 sampling=sampling,
265 )
266 gaussian_gradient = pydiff.Gradient(
267 dim_shape=dim_shape, directions=(1, 2), diff_method="gd", sigma=sigma_gd, sampling=sampling
268 )
269 pm_diffusion_coeff = DiffusionCoeffIsotropic(
270 dim_shape=dim_shape,
271 diffusivity=PeronaMalikDiffusivity(
272 dim_shape=dim_shape, gradient=gaussian_gradient, beta=beta, pm_fct=pm_fct
273 ),
274 )
275 super().__init__(dim_shape, gradient=gradient, diffusion_coefficient=pm_diffusion_coeff)
276 self.beta = beta
277 self.pm_fct = pm_fct
278 # initialize lipschitz constants. should this be done instead inside _DiffusionOp.__init__ ??
279 self.diff_lipschitz = gradient.lipschitz**2
280
281 def _apply_exponential(self, arr: pyct.NDArray) -> pyct.Real:
282 xp = pycu.get_array_module(arr) # (batch,nchannels,nx,ny)
283 # Inplace implementation of
284 # 0.5*(beta**2)*sum(1 - xp.exp(-grad_norm_sq/beta**2))
285 y = -self.diffusion_coefficient.diffusivity._compute_grad_norm_sq(arr, self.gradient) # (batch,nchannels,nx,ny)
286 y /= self.beta**2
287 y = 1 - xp.exp(y)
288 z = xp.sum(y, axis=(-3, -2, -1)) # (batch,)
289 z *= self.beta**2
290 return 0.5 * z # (batch,)
291
292 def _apply_rational(self, arr: pyct.NDArray) -> pyct.NDArray:
293 xp = pycu.get_array_module(arr) # (batch,nchannels,nx,ny)
294 # Inplace implementation of
295 # 0.5*(beta**2)*sum(xp.log(1+grad_norm_sq/beta**2)
296 y = self.diffusion_coefficient.diffusivity._compute_grad_norm_sq(arr, self.gradient) # (batch,nchannels,nx,ny)
297 y /= self.beta**2
298 y += 1
299 y = xp.log(y)
300 z = xp.sum(y, axis=(-3, -2, -1)) # (batch,)
301 z *= self.beta**2
302 return 0.5 * z # (batch,)
303
304 def apply(self, arr: pyct.NDArray) -> pyct.NDArray:
305 f = dict(
306 exponential=self._apply_exponential,
307 rational=self._apply_rational,
308 ).get(self.pm_fct)
309 out = f(arr)
310 return out
311
312
[docs]
313class TikhonovDiffusion(_Diffusion):
314 r"""
315 Tikhonov diffusion operator, featuring a homogeneous isotropic diffusion tensor with constant diffusivity. It
316 achieves Gaussian smoothing, isotropically smoothing in all directions. The diffusion intensity is identical at all
317 locations. Smoothing with this diffusion operator blurs the original image.
318
319 The gradient of the operator reads :math:`\nabla\phi(\mathbf{f})=-\mathrm{div}(\boldsymbol{\nabla}\mathbf{f})=-\boldsymbol{\Delta}\mathbf{f}`;
320 it derives from the potential :math:`\phi=\Vert\boldsymbol{\nabla}\mathbf{f}\Vert_2^2`.
321
322 The Tikohnov diffusion tensor is defined, for the :math:`i`-th pixel, as:
323
324 .. math::
325
326 \big(\mathbf{D}(\mathbf{f})\big)_i = \mathbf{I} = \begin{pmatrix}1 & 0\\ 0 & 1\end{pmatrix}.
327
328 Parameters
329 ----------
330 dim_shape: NDArrayShape
331 Shape of the input array.
332 sampling: Real, list[Real]
333 Sampling step (i.e. distance between two consecutive elements of an array).
334 Defaults to 1.
335
336 Returns
337 -------
338 op: OpT
339 Tikhonov diffusion operator.
340
341 Example
342 -------
343
344 .. plot::
345
346 import numpy as np
347 import matplotlib.pyplot as plt
348 import pyxu.opt.solver as pysol
349 import pyxu.abc.solver as pysolver
350 import pyxu.opt.stop as pystop
351 import pyxu_diffops.operator as pyxop
352 import skimage as skim
353
354 # Import RGB image
355 image = skim.data.cat().astype(float)
356 print(image.shape) # (300, 451, 3)
357
358 # Move color-stacking axis to front (needed for pyxu stacking convention)
359 image = np.moveaxis(image, 2, 0)
360 print(image.shape) # (3, 300, 451)
361
362 # Instantiate diffusion operator
363 tikh_diffop = pyxop.TikhonovDiffusion(dim_shape=(3, 300, 451))
364
365 # Define PGD solver, with stopping criterion and starting point x0
366 stop_crit = pystop.MaxIter(n=100)
367
368 # Perform 50 gradient flow iterations starting from x0
369 PGD = pysol.PGD(f=tikh_diffop, show_progress=False, verbosity=100)
370 PGD.fit(**dict(mode=pysolver.SolverMode.BLOCK, x0=image, stop_crit=stop_crit,
371 tau=2 / tikh_diffop.diff_lipschitz))
372 tikh_smoothed_image = PGD.solution()
373
374 # Reshape images for plotting.
375 image = np.moveaxis(image, 0, 2)
376 tikh_smoothed_image = np.moveaxis(tikh_smoothed_image, 0, 2)
377
378 # Plot
379 fig, ax = plt.subplots(1, 2, figsize=(10, 5))
380 ax[0].imshow(image.astype(int))
381 ax[0].set_title("Image", fontsize=15)
382 ax[0].axis('off')
383 ax[1].imshow(tikh_smoothed_image.astype(int))
384 ax[1].set_title("100 iterations Tikhonov smoothing", fontsize=15)
385 ax[1].axis('off')
386
387 """
388
389 def __init__(
390 self,
391 dim_shape: pyct.NDArrayShape,
392 sampling: typ.Union[pyct.Real, cabc.Sequence[pyct.Real, ...]] = 1,
393 ):
394 gradient = pydiff.Gradient(
395 dim_shape=dim_shape,
396 directions=(1, 2),
397 diff_method="fd",
398 scheme="forward",
399 mode="symmetric",
400 sampling=sampling,
401 )
402 tikhonov_diffusion_coeff = DiffusionCoeffIsotropic(dim_shape=dim_shape)
403 super().__init__(dim_shape, gradient=gradient, diffusion_coefficient=tikhonov_diffusion_coeff)
404 # initialize lipschitz constants. should this be done instead inside _DiffusionOp.__init__ ??
405 self.diff_lipschitz = gradient.lipschitz**2
406
407 def apply(self, arr: pyct.NDArray) -> pyct.Real:
408 xp = pycu.get_array_module(arr) # (batch,nchannels,nx,ny)
409 y = self._compute_divergence_term(arr) # (batch,nchannels,nx,ny)
410 return xp.einsum("...jkl,...jkl->...", y, arr) / 2 # (batch,)
411
412
[docs]
413class TotalVariationDiffusion(_Diffusion):
414 r"""
415 Total variation (TV) diffusion operator, featuring an inhomogeneous isotropic diffusion tensor. It can be used for edge-enhancing smoothing.
416 It reduces the diffusion intensity in locations characterised by large gradients, effectively achieving edge-preservation. The
417 diffusion operator formulation of total variation regularization stems from the Euler-Lagrange equations associated to the
418 problem of minimizing the TV regularization functional.
419
420 The gradient of the operator reads :math:`\nabla\phi(\mathbf{f})=-\mathrm{div}(\mathbf{D}(\mathbf{f})\boldsymbol{\nabla}\mathbf{f})`,
421 deriving from the total variation potential :math:`\phi`.
422 In the TV classical (*untame*) formulation, it holds
423 :math:`\phi=\Vert \boldsymbol{\nabla}\mathbf{f}\Vert_{2,1}=\sum_{i=0}^{N_{tot}-1}\vert(\boldsymbol{\nabla}\mathbf{f})_i\vert`.
424 A similar definition holds for its *tame* version (recommended).
425
426 The TV diffusion tensor is defined, for the :math:`i`-th pixel, as:
427
428 .. math::
429
430 \big(\mathbf{D}(\mathbf{f})\big)_i = (g(\mathbf{f}))_i \;\mathbf{I} = \begin{pmatrix}(g(\mathbf{f}))_i & 0\\ 0 & (g(\mathbf{f}))_i\end{pmatrix}.
431
432 The diffusivity can be defined in two possible ways:
433
434 * in the *tame* case as
435 .. math ::
436 (g(\mathbf{f}))_i = \frac{\beta} { \sqrt{\beta^2+ \vert (\boldsymbol{\nabla} \mathbf{f})_i \vert ^2}}, \quad \forall i
437
438 * in the *untame* case as
439 .. math ::
440 (g(\mathbf{f}))_i = \frac{1} { \vert (\boldsymbol{\nabla} \mathbf{f})_i \vert}, \quad \forall i.
441
442 The *tame* formulation amounts to an approximation of the TV functional very similar to the Huber loss approach. The
443 parameter :math:`\beta` controls the quality of the smooth approximation of the :math:`L^2` norm
444 :math:`\vert (\boldsymbol{\nabla} \mathbf{f})_i \vert` involved in the TV
445 approach. Lower values correspond to better approximations but typically lead to larger computational cost.
446
447 We recommended using the *tame* version; the *untame* one is unstable.
448
449 Parameters
450 ----------
451 dim_shape: NDArrayShape
452 Shape of the input array.
453 beta: Real
454 Contrast parameter, determines the magnitude above which image gradients are preserved. Defaults to 1.
455 tame: bool
456 Whether tame or untame version should be used. Defaults to `True` (tame, recommended).
457 sampling: Real, list[Real]
458 Sampling step (i.e. distance between two consecutive elements of an array).
459 Defaults to 1.
460
461 Returns
462 -------
463 op: OpT
464 Total variation diffusion operator.
465
466 Example
467 -------
468
469 .. plot::
470
471 import numpy as np
472 import matplotlib.pyplot as plt
473 import pyxu.opt.solver as pysol
474 import pyxu.abc.solver as pysolver
475 import pyxu.opt.stop as pystop
476 import pyxu_diffops.operator as pyxop
477 import skimage as skim
478
479 # Import RGB image
480 image = skim.data.cat().astype(float)
481 print(image.shape) # (300, 451, 3)
482
483 # Move color-stacking axis to front (needed for pyxu stacking convention)
484 image = np.moveaxis(image, 2, 0)
485 print(image.shape) # (3, 300, 451)
486
487 # Instantiate diffusion operator
488 tv_diffop = pyxop.TotalVariationDiffusion(dim_shape=(3, 300, 451), beta=2)
489
490 # Define PGD solver, with stopping criterion and starting point x0
491 stop_crit = pystop.MaxIter(n=100)
492
493 # Perform 50 gradient flow iterations starting from x0
494 PGD = pysol.PGD(f=tv_diffop, show_progress=False, verbosity=100)
495 PGD.fit(**dict(mode=pysolver.SolverMode.BLOCK, x0=image, stop_crit=stop_crit,
496 tau=2/tv_diffop.diff_lipschitz))
497 tv_smoothed_image = PGD.solution()
498
499 # Reshape images for plotting.
500 image = np.moveaxis(image, 0, 2)
501 tv_smoothed_image = np.moveaxis(tv_smoothed_image, 0, 2)
502
503 # Plot
504 fig, ax = plt.subplots(1, 2, figsize=(10, 5))
505 ax[0].imshow(image.astype(int))
506 ax[0].set_title("Image", fontsize=15)
507 ax[0].axis('off')
508 ax[1].imshow(tv_smoothed_image.astype(int))
509 ax[1].set_title("100 iterations Total Variation smoothing", fontsize=15)
510 ax[1].axis('off')
511
512 """
513
514 def __init__(
515 self,
516 dim_shape: pyct.NDArrayShape,
517 beta: pyct.Real = 1,
518 tame: bool = True,
519 # sigma_gd: pyct.Real = 1,
520 sampling: typ.Union[pyct.Real, cabc.Sequence[pyct.Real, ...]] = 1,
521 ):
522 gradient = pydiff.Gradient(
523 dim_shape=dim_shape,
524 directions=(1, 2),
525 diff_method="fd",
526 scheme="forward",
527 mode="symmetric",
528 sampling=sampling,
529 )
530 tv_diffusion_coeff = DiffusionCoeffIsotropic(
531 dim_shape=dim_shape,
532 diffusivity=TotalVariationDiffusivity(dim_shape=dim_shape, gradient=gradient, beta=beta, tame=tame),
533 )
534 super().__init__(dim_shape, gradient=gradient, diffusion_coefficient=tv_diffusion_coeff)
535 self.beta = beta
536 self.tame = tame
537 # initialize lipschitz constants. should this be done instead inside _DiffusionOp.__init__ ??
538 self.diff_lipschitz = gradient.lipschitz**2
539 if not self.tame:
540 self.diff_lipschitz = np.inf
541
542 # @pycrt.enforce_precision(i="arr")
543 def _apply_tame(self, arr: pyct.NDArray) -> pyct.NDArray:
544 # Inplace implementation of
545 # beta**2*sum(xp.sqrt(1+grad_norm_sq/beta**2)
546 xp = pycu.get_array_module(arr)
547 y = self.diffusion_coefficient.diffusivity._compute_grad_norm_sq(arr, self.gradient) # (batch,nchannels,nx,ny)
548 y = y[..., 0, :, :] # (batch,nx,ny) all channels are the same, duplicated information
549 y /= self.beta**2
550 y += 1
551 y = xp.sqrt(y)
552 z = xp.sum(y, axis=(-2, -1)) # (batch,)
553 return self.beta**2 * z
554
555 # @pycrt.enforce_precision(i="arr")
556 def _apply_untamed(self, arr: pyct.NDArray) -> pyct.NDArray:
557 xp = pycu.get_array_module(arr)
558 y = self.diffusion_coefficient.diffusivity._compute_grad_norm_sq(arr, self.gradient) # (batch,nchannels,nx,ny)
559 y = xp.sqrt(y[..., 0, :, :]) # (batch,nx,ny)
560 return xp.sum(y, axis=(-2, -1)) # (batch,)
561
562 # @pycrt.enforce_precision(i="arr")
563 # @pycu.vectorize("arr")
564 def apply(self, arr: pyct.NDArray) -> pyct.NDArray:
565 f = self._apply_tame if self.tame else self._apply_untamed
566 out = f(arr)
567 return out
568
569
[docs]
570class CurvaturePreservingDiffusionOp(_Diffusion):
571 r"""
572 Curvature preserving diffusion operator [Tschumperle]_. This trace-based operator promotes curvature
573 preservation along a given vector field :math:`\mathbf{w}`, defined by a :math:`2`-dimensional
574 vector :math:`\mathbf{w}_i\in\mathbb{R}^2` for each pixel :math:`i`.
575
576 The gradient of the operator reads
577
578 .. math::
579 \frac{\partial\mathbf{f}}{\partial t} = -\mathrm{trace}\big(\mathbf{T}(\mathbf{w})\mathbf{H}(\mathbf{f})\big) - (\nabla\mathbf{f})^T \mathbf{J}_{\mathbf{w}}\mathbf{w},
580
581 where the trace diffusion tensor :math:`\mathbf{T}(\mathbf{f})` is defined, for the :math:`i`-th pixel, as
582
583 .. math::
584 \big(\mathbf{T}(\mathbf{f})\big)_i=\mathbf{w}_i\mathbf{w}_i^T.
585
586 The curvature preserving diffusion operator does not admit a potential.
587
588 Parameters
589 ----------
590 dim_shape: NDArrayShape
591 Shape of the input array.
592 curvature_preservation_field: NDArray
593 Vector field of shape :math:`(2, N_0, N_1)` along which curvature should be preserved.
594 sampling: Real, list[Real]
595 Sampling step (i.e. distance between two consecutive elements of an array).
596 Defaults to 1.
597
598 Returns
599 -------
600 op: OpT
601 Curvature preserving diffusion operator.
602
603 Example
604 -------
605
606 .. plot::
607
608 import numpy as np
609 import matplotlib.pyplot as plt
610 import pyxu.operator.linop.diff as pydiff
611 import pyxu.opt.solver as pysol
612 import pyxu.abc.solver as pysolver
613 import pyxu.opt.stop as pystop
614 import pyxu_diffops.operator as pyxop
615 import skimage as skim
616
617 # Define random image
618 image = 255 * np.random.rand(3, 300, 451)
619
620 # Define vector field, diffusion process will preserve curvature along it
621 image_center = np.array(image.shape[1:]) / 2 + [0.25, 0.25]
622 curvature_preservation_field = np.zeros((2, np.prod(image.shape[1:])))
623 curv_pres_1 = np.zeros(image.shape[1:])
624 curv_pres_2 = np.zeros(image.shape[1:])
625 for i in range(image.shape[1]):
626 for j in range(image.shape[2]):
627 theta = np.arctan2(-i + image_center[0], j - image_center[1])
628 curv_pres_1[i, j] = np.cos(theta)
629 curv_pres_2[i, j] = np.sin(theta)
630 curvature_preservation_field[0, :] = curv_pres_1.reshape(1, -1)
631 curvature_preservation_field[1, :] = curv_pres_2.reshape(1, -1)
632 curvature_preservation_field = curvature_preservation_field.reshape(2, *image.shape[1:])
633
634 # Define curvature-preserving diffusion operator
635 CurvPresDiffusionOp = pyxop.CurvaturePreservingDiffusionOp(dim_shape=(3, 300, 451),
636 curvature_preservation_field=curvature_preservation_field)
637
638 # Perform 500 gradient flow iterations
639 stop_crit = pystop.MaxIter(n=200)
640 PGD_curve = pysol.PGD(f=CurvPresDiffusionOp, g=None, show_progress=False, verbosity=100)
641
642 PGD_curve.fit(**dict(mode=pysolver.SolverMode.BLOCK, x0=image, stop_crit=stop_crit, acceleration=False,
643 tau=1 / CurvPresDiffusionOp.diff_lipschitz))
644 curv_smooth_image = PGD_curve.solution()
645
646 # Reshape images for plotting.
647 image = np.moveaxis(image, 0, 2)
648 curv_smooth_image = np.moveaxis(curv_smooth_image, 0, 2)
649 # Rescale for better constrast
650 curv_smooth_image -= np.min(curv_smooth_image)
651 curv_smooth_image /= np.max(curv_smooth_image)
652 curv_smooth_image *= 255
653
654 # Plot
655 fig, ax = plt.subplots(1, 3, figsize=(30, 6))
656 ax[0].imshow(image.astype(int), cmap="gray", aspect="auto")
657 ax[0].set_title("Image", fontsize=30)
658 ax[0].axis('off')
659 ax[1].quiver(curv_pres_2[::40, ::60], curv_pres_1[::40, ::60])
660 ax[1].set_title("Vector field", fontsize=30)
661 ax[1].axis('off')
662 ax[2].imshow(curv_smooth_image.astype(int), cmap="gray", aspect="auto")
663 ax[2].set_title("200 iterations Curvature Preserving", fontsize=30)
664 ax[2].axis('off')
665
666 """
667
668 def __init__(
669 self,
670 dim_shape: pyct.NDArrayShape,
671 curvature_preservation_field: pyct.NDArray = None, # (2dim,nx,ny)
672 sampling: typ.Union[pyct.Real, cabc.Sequence[pyct.Real, ...]] = 1,
673 ):
674 xp = pycu.get_array_module(curvature_preservation_field)
675 self.nchannels = dim_shape[0]
676 gradient = pydiff.Gradient(
677 dim_shape=dim_shape, directions=(1, 2), diff_method="fd", scheme="central", mode="edge", sampling=sampling
678 )
679 hessian = pydiff.Hessian(
680 dim_shape=dim_shape[1:], diff_method="fd", mode="symmetric", sampling=sampling, scheme="central", accuracy=2
681 )
682 curvature_preserving_term = CurvaturePreservingTerm(
683 dim_shape=dim_shape, gradient=gradient, curvature_preservation_field=curvature_preservation_field
684 )
685 # assemble trace diffusion tensor
686 cf_sh = curvature_preservation_field.shape
687 tensors = curvature_preservation_field.reshape(1, *cf_sh) * curvature_preservation_field.reshape(
688 cf_sh[0], 1, *cf_sh[1:]
689 ) # (2dim,2dim,nx,ny)
690 tensors = xp.stack([tensors] * self.nchannels, -3) # (2dim,2dim,nchannels,nx,ny)
691 trace_diffusion_coefficient = _DiffusionCoefficient(dim_shape=dim_shape, isotropic=False, trace_term=True)
692 trace_diffusion_coefficient.set_frozen_coeff(tensors) # (2dim,2dim,nchannels,nx,ny)
693
694 super().__init__(
695 dim_shape,
696 hessian=hessian,
697 trace_diffusion_coefficient=trace_diffusion_coefficient,
698 extra_diffusion_term=curvature_preserving_term,
699 )
700 # initialize lipschitz constants. should this be done instead inside _DiffusionOp.__init__ ? I think it's fine here.
701 self.diff_lipschitz = (
702 hessian.lipschitz * curvature_preserving_term.max_norm + curvature_preserving_term.lipschitz
703 )
704
705
[docs]
706class AnisEdgeEnhancingDiffusionOp(_Diffusion):
707 r"""
708 Anisotropic edge-enhancing diffusion operator, featuring an inhomogeneous anisotropic diffusion tensor
709 defined as in [Weickert]_. The diffusion tensor is defined as a function of the
710 :py:class:`~pyxu.operator.linop.filter.StructureTensor`,
711 which is a tensor describing the properties of the image in the neighbourhood of each pixel.
712 This diffusion operator allows edge enhancement by reducing the flux in the direction of the
713 eigenvector of the structure tensor with largest eigenvalue. Essentially, smoothing across
714 the detected edges is inhibited in a more sophisticated way compared to isotropic approaches,
715 by considering not only the local gradient but its behaviour in a neighbourhood of the pixel.
716
717 The gradient of the operator reads :math:`-\mathrm{div}(\mathbf{D}(\mathbf{f})\boldsymbol{\nabla}\mathbf{f})`.
718
719 Let :math:`\mathbf{v}_0^i,\mathbf{v}_1^i` be the eigenvectors of the :math:`i`-th pixel structure tensor
720 :math:`S_i`, with eigenvalues :math:`e_0^i, e_1^i` sorted in decreasing order. The diffusion tensor is defined,
721 for the :math:`i`-th pixel, as the symmetric positive definite tensor:
722
723 .. math::
724 \big(\mathbf{D}(\mathbf{f})\big)_i = \lambda_0^i\mathbf{v}_0^i(\mathbf{v}_0^i)^T+\lambda_1^i\mathbf{v}_1^i(\mathbf{v}_1^i)^T,
725
726
727 where the smoothing intensities along the two eigendirections are defined, to achieve edge-enhancing, as
728
729 .. math::
730 \lambda_0^i &= g(e_0^i) :=
731 \begin{cases}
732 1 & \text{if } e_0^i = 0 \\
733 1 - \exp\big(\frac{-C}{(e_0^i/\beta)^m}\big) & \text{if } e_0^i >0,
734 \end{cases}\\
735 \lambda_1^i &= 1,
736
737 where :math:`\beta` is a contrast parameter, :math:`m` controls the decay rate of :math:`\lambda_0^i` as a function
738 of :math:`e_0^i`, and :math:`C\in\mathbb{R}` is a normalization constant (see [Weickert]_).
739
740 Since :math:`\lambda_1^i \geq \lambda_0^i`, the smoothing intensity is stronger in the direction of the second
741 eigenvector of :math:`\mathbf{S}_i`, and therefore perpendicular to the (locally averaged) gradient.
742 Moreover, :math:`\lambda_0^i` is a decreasing function of :math:`e_0^i`, which indicates that when the gradient
743 magnitude is high (sharp edges), smoothing in the direction of the gradient is inhibited, i.e., edges are preserved.
744
745 In general, the diffusion operator does not admit a potential. However, if the diffusion tensor is evaluated
746 at some image :math:`\tilde{\mathbf{f}}` and kept fixed to :math:`\mathbf{D}(\tilde{\mathbf{f}})`, the operator
747 derives from the potential :math:`\Vert\sqrt{\mathbf{D}(\tilde{\mathbf{f}})}\boldsymbol{\nabla}\mathbf{f}\Vert_2^2`.
748 By doing so, the smoothing directions and intensities are fixed according to the features of an image of interest.
749 This can be achieved passing a *freezing array* at initialization. In such cases, a *matrix-based implementation*
750 is recommended for efficiency.
751
752 Parameters
753 ----------
754 dim_shape: NDArrayShape
755 Shape of the input array.
756 beta: Real
757 Contrast parameter, determines the gradient magnitude for edge enhancement. Defaults to 1.
758 m: Real
759 Decay parameter, determines how quickly the smoothing effect changes as a function of :math:`e_0^i/\beta`. Defaults to 4.
760 sigma_gd_st: Real
761 Gaussian width of the gaussian derivative involved in the structure tensor computation. Defaults to 2.
762 smooth_sigma_st: Real
763 Width of the Gaussian filter smoothing the structure tensor (local averaging). Defaults to 0 (recommended).
764 sampling: Real, list[Real]
765 Sampling step (i.e. distance between two consecutive elements of an array).
766 Defaults to 1.
767 freezing_arr: NDArray
768 Array at which the diffusion tensor is evaluated and then frozen.
769 matrix_based_impl: bool
770 Whether to use matrix based implementation or not. Defaults to False. Recommended to set `True` if
771 a ``freezing_arr`` is passed.
772
773 Returns
774 -------
775 op: OpT
776 Anisotropic edge-enhancing diffusion operator.
777
778 Example
779 -------
780
781 .. plot::
782
783 import numpy as np
784 import matplotlib.pyplot as plt
785 import pyxu.opt.solver as pysol
786 import pyxu.abc.solver as pysolver
787 import pyxu.opt.stop as pystop
788 import pyxu_diffops.operator as pyxop
789 import skimage as skim
790
791 # Import RGB image
792 image = skim.data.cat().astype(float)
793 print(image.shape) # (300, 451, 3)
794
795 # Move color-stacking axis to front (needed for pyxu stacking convention)
796 image = np.moveaxis(image, 2, 0)
797 print(image.shape) # (3, 300, 451)
798
799 # Instantiate diffusion operator
800 edge_enh_diffop = pyxop.AnisEdgeEnhancingDiffusionOp(dim_shape=(3, 300, 451), beta=10)
801
802 # Define PGD solver, with stopping criterion and starting point x0
803 stop_crit = pystop.MaxIter(n=100)
804
805 # Perform 50 gradient flow iterations starting from x0
806 PGD = pysol.PGD(f=edge_enh_diffop, show_progress=False, verbosity=100)
807 PGD.fit(**dict(mode=pysolver.SolverMode.BLOCK, x0=image, stop_crit=stop_crit,
808 tau=2 / edge_enh_diffop.diff_lipschitz))
809 edge_enh_image = PGD.solution()
810
811 # Reshape images for plotting.
812 image = np.moveaxis(image, 0, 2)
813 edge_enh_image = np.moveaxis(edge_enh_image, 0, 2)
814
815 # Plot
816 fig, ax = plt.subplots(1, 2, figsize=(10, 5))
817 ax[0].imshow(image.astype(int))
818 ax[0].set_title("Image", fontsize=15)
819 ax[0].axis('off')
820 ax[1].imshow(edge_enh_image.astype(int))
821 ax[1].set_title("100 iterations Anis. Edge Enhancing", fontsize=15)
822 ax[1].axis('off')
823
824 """
825
826 def __init__(
827 self,
828 dim_shape: pyct.NDArrayShape,
829 beta: pyct.Real = 1.0,
830 m: pyct.Real = 4.0,
831 sigma_gd_st: pyct.Real = 2,
832 smooth_sigma_st: pyct.Real = 0,
833 sampling: typ.Union[pyct.Real, cabc.Sequence[pyct.Real, ...]] = 1,
834 freezing_arr: pyct.NDArray = None,
835 matrix_based_impl: bool = False,
836 gpu: bool = False,
837 dtype: typ.Optional[pyct.DType] = None,
838 ):
839 gradient = pydiff.Gradient(
840 dim_shape=dim_shape,
841 directions=(1, 2),
842 diff_method="fd",
843 scheme="forward",
844 mode="symmetric",
845 sampling=sampling,
846 gpu=gpu,
847 dtype=dtype,
848 )
849 structure_tensor = pyfilt.StructureTensor(
850 dim_shape=dim_shape[1:],
851 diff_method="gd",
852 smooth_sigma=smooth_sigma_st,
853 sigma=sigma_gd_st,
854 mode="symmetric",
855 sampling=sampling,
856 gpu=gpu,
857 dtype=dtype,
858 )
859 edge_enh_diffusion_coeff = DiffusionCoeffAnisoEdgeEnhancing(
860 dim_shape=dim_shape, structure_tensor=structure_tensor, beta=beta, m=m
861 )
862 self.freezing_arr = freezing_arr
863 if freezing_arr is not None:
864 if len(freezing_arr.shape) == len(dim_shape) - 1:
865 xp = pycu.get_array_module(freezing_arr)
866 self.freezing_arr = xp.expand_dims(freezing_arr, axis=0)
867 edge_enh_diffusion_coeff.freeze(self.freezing_arr)
868 super().__init__(
869 dim_shape,
870 gradient=gradient,
871 diffusion_coefficient=edge_enh_diffusion_coeff,
872 matrix_based_impl=matrix_based_impl,
873 gpu=gpu,
874 dtype=dtype,
875 )
876 # initialize lipschitz constants. should this be done instead inside _DiffusionOp.__init__ ??
877 self.diff_lipschitz = gradient.lipschitz**2
878
879 # @pycrt.enforce_precision(i="arr")
880 # @pycu.vectorize("arr")
881 def apply(self, arr: pyct.NDArray) -> pyct.Real:
882 if self.freezing_arr is not None:
883 xp = pycu.get_array_module(arr)
884 y = self._compute_divergence_term(arr) # (batch,nchannels,nx,ny)
885 return xp.einsum("...jkl,...jkl->...", y, arr) / 2 # (batch,)
886 else:
887 raise RuntimeError(
888 "'apply()' method not defined for AnisEdgeEnhancingDiffusionOp if no `freezing_arr` is passed."
889 )
890
891
[docs]
892class AnisCoherenceEnhancingDiffusionOp(_Diffusion):
893 r"""
894 Anisotropic coherence-enhancing diffusion operator, featuring an inhomogeneous anisotropic diffusion tensor
895 defined as in [Weickert]_. The diffusion tensor is defined as a function of the
896 :py:class:`~pyxu.operator.linop.filter.StructureTensor`,
897 which is a tensor describing the properties of the image in the neighbourhood of each pixel.
898 This diffusion operator allows coherence enhancement by increasing the flux in the direction of the
899 eigenvector of the structure tensor with smallest eigenvalue, with an intensity which depends on
900 the image coherence, which is measured as :math:`(e_0-e_1)^2`. Stronger coherence leads to stronger smoothing.
901
902 The gradient of the operator reads :math:`-\mathrm{div}(\mathbf{D}(\mathbf{f})\boldsymbol{\nabla}\mathbf{f})`.
903
904 Let :math:`\mathbf{v}_0^i,\mathbf{v}_1^i` be the eigenvectors of the :math:`i`-th pixel structure tensor
905 :math:`S_i`, with eigenvalues :math:`e_0^i, e_1^i` sorted in decreasing order. The diffusion tensor is defined,
906 for the :math:`i`-th pixel, as the symmetric positive definite tensor:
907
908 .. math::
909
910 \big(\mathbf{D}(\mathbf{f})\big)_i = \lambda_0^i\mathbf{v}_0^i(\mathbf{v}_0^i)^T+\lambda_1^i\mathbf{v}_1^i(\mathbf{v}_1^i)^T,
911
912 where the smoothing intensities along the two eigendirections are defined, to achieve coherence-enhancing, as
913
914 .. math::
915 \lambda_0^i &= \alpha,\\
916 \lambda_1^i &= h(e_0^i, e_1^i) :=
917 \begin{cases}
918 \alpha & \text{if } e_0^i=e_1^i \\
919 \alpha + (1-\alpha) \exp \big(\frac{-C}{(e_0^i-e_1^i)^{2m}}\big) & \text{otherwise},
920 \end{cases}
921
922 where :math:`\alpha \in (0, 1)` controls the smoothing intensity along the first eigendirection, :math:`m` controls
923 the decay rate of :math:`\lambda_0^i` as a function of :math:`(e_0^i-e_1^i)`, and :math:`C\in\mathbb{R}` is a constant.
924
925 For regions with low coherence, smoothing is performed uniformly along all
926 directions with intensity :math:`\lambda_1^i \approx \lambda_0^i = \alpha`. For regions with high coherence, we have
927 :math:`\lambda_1^i \approx 1 > \lambda_0^i = \alpha`, hence the smoothing intensity is higher in the direction
928 orthogonal to the (locally averaged) gradient.
929
930 In general, the diffusion operator does not admit a potential. However, if the diffusion tensor is evaluated
931 at some image :math:`\tilde{\mathbf{f}}` and kept fixed to :math:`\mathbf{D}(\tilde{\mathbf{f}})`, the operator
932 derives from the potential :math:`\Vert\sqrt{\mathbf{D}(\tilde{\mathbf{f}})}\boldsymbol{\nabla}\mathbf{f}\Vert_2^2`.
933 By doing so, the smoothing directions and intensities are fixed according to the features of an image of interest.
934 This can be achieved passing a *freezing array* at initialization. In such cases, a *matrix-based implementation*
935 is recommended for efficiency.
936
937 Parameters
938 ----------
939 dim_shape: NDArrayShape
940 Shape of the input array.
941 alpha: Real
942 Anisotropic parameter, determining intensity of smoothing inhibition along the (locally averaged) image gradient.
943 Defaults to 1e-1.
944 m: Real
945 Decay parameter, determines how quickly the smoothing effect changes as a function of :math:`e_0^i/\beta`.
946 Defaults to 1.
947 sigma_gd_st: Real
948 Gaussian width of the gaussian derivative involved in the structure tensor computation
949 (if `diff_method_struct_tens="gd"`). Defaults to 2.
950 smooth_sigma_st: Real
951 Width of the Gaussian filter smoothing the structure tensor (local averaging). Defaults to 4.
952 sampling: Real, list[Real]
953 Sampling step (i.e. distance between two consecutive elements of an array).
954 Defaults to 1.
955 diff_method_struct_tens: str
956 Differentiation method for structure tensor computation. Must be either 'gd' or 'fd'.
957 Defaults to 'gd'.
958 freezing_arr: NDArray
959 Array at which the diffusion tensor is evaluated and then frozen.
960 matrix_based_impl: bool
961 Whether to use matrix based implementation or not. Defaults to False. Recommended to set `True` if
962 a ``freezing_arr`` is passed.
963
964 Returns
965 -------
966 op: OpT
967 Anisotropic coherence-enhancing diffusion operator.
968
969 Example
970 -------
971
972 .. plot::
973
974 import numpy as np
975 import matplotlib.pyplot as plt
976 import pyxu.opt.solver as pysol
977 import pyxu.abc.solver as pysolver
978 import pyxu.opt.stop as pystop
979 import pyxu_diffops.operator as pyxop
980 import skimage as skim
981
982 # Import RGB image
983 image = skim.data.cat().astype(float)
984 print(image.shape) # (300, 451, 3)
985
986 # Move color-stacking axis to front (needed for pyxu stacking convention)
987 image = np.moveaxis(image, 2, 0)
988 print(image.shape) # (3, 300, 451)
989
990 # Instantiate diffusion operator
991 coh_enh_diffop = pyxop.AnisCoherenceEnhancingDiffusionOp(dim_shape=(3, 300, 451), alpha=1e-3)
992
993 # Define PGD solver, with stopping criterion and starting point x0
994 stop_crit = pystop.MaxIter(n=100)
995
996 # Perform 50 gradient flow iterations starting from x0
997 PGD = pysol.PGD(f=coh_enh_diffop, show_progress=False, verbosity=100)
998 PGD.fit(**dict(mode=pysolver.SolverMode.BLOCK, x0=image, stop_crit=stop_crit,
999 tau=2 / coh_enh_diffop.diff_lipschitz))
1000 coh_enh_image = PGD.solution()
1001
1002 # Reshape images for plotting.
1003 image = np.moveaxis(image, 0, 2)
1004 coh_enh_image = np.moveaxis(coh_enh_image, 0, 2)
1005
1006 # Plot
1007 fig, ax = plt.subplots(1, 2, figsize=(10, 5))
1008 ax[0].imshow(image.astype(int))
1009 ax[0].set_title("Image", fontsize=15)
1010 ax[0].axis('off')
1011 ax[1].imshow(coh_enh_image.astype(int))
1012 ax[1].set_title("100 iterations Anis. Coherence Enhancing", fontsize=15)
1013 ax[1].axis('off')
1014
1015 """
1016
1017 def __init__(
1018 self,
1019 dim_shape: pyct.NDArrayShape,
1020 alpha: pyct.Real = 0.1,
1021 m: pyct.Real = 1,
1022 sigma_gd_st: pyct.Real = 2,
1023 smooth_sigma_st: pyct.Real = 4,
1024 sampling: typ.Union[pyct.Real, cabc.Sequence[pyct.Real, ...]] = 1,
1025 diff_method_struct_tens: str = "gd",
1026 freezing_arr: pyct.NDArray = None,
1027 matrix_based_impl: bool = False,
1028 gpu: bool = False,
1029 dtype: typ.Optional[pyct.DType] = None,
1030 ):
1031 gradient = pydiff.Gradient(
1032 dim_shape=dim_shape,
1033 directions=(1, 2),
1034 diff_method="fd",
1035 scheme="forward",
1036 mode="symmetric",
1037 sampling=sampling,
1038 gpu=gpu,
1039 dtype=dtype,
1040 )
1041 structure_tensor = pyfilt.StructureTensor(
1042 dim_shape=dim_shape[1:],
1043 diff_method=diff_method_struct_tens,
1044 smooth_sigma=smooth_sigma_st,
1045 sigma=sigma_gd_st,
1046 mode="symmetric",
1047 sampling=sampling,
1048 gpu=gpu,
1049 dtype=dtype,
1050 )
1051 coh_enh_diffusion_coeff = DiffusionCoeffAnisoCoherenceEnhancing(
1052 dim_shape=dim_shape, structure_tensor=structure_tensor, alpha=alpha, m=m
1053 )
1054 self.freezing_arr = freezing_arr
1055 if freezing_arr is not None:
1056 if len(freezing_arr.shape) == len(dim_shape) - 1:
1057 xp = pycu.get_array_module(freezing_arr)
1058 self.freezing_arr = xp.expand_dims(freezing_arr, axis=0)
1059 coh_enh_diffusion_coeff.freeze(self.freezing_arr)
1060 super().__init__(
1061 dim_shape,
1062 gradient=gradient,
1063 diffusion_coefficient=coh_enh_diffusion_coeff,
1064 matrix_based_impl=matrix_based_impl,
1065 gpu=gpu,
1066 dtype=dtype,
1067 )
1068 # initialize lipschitz constants. should this be done instead inside _DiffusionOp.__init__ ??
1069 self.diff_lipschitz = gradient.lipschitz**2
1070
1071 # @pycrt.enforce_precision(i="arr")
1072 # @pycu.vectorize("arr")
1073 def apply(self, arr: pyct.NDArray) -> pyct.Real:
1074 if self.freezing_arr is not None:
1075 xp = pycu.get_array_module(arr)
1076 y = self._compute_divergence_term(arr) # (batch,nchannels,nx,ny)
1077 return xp.einsum("...jkl,...jkl->...", y, arr) / 2 # (batch,)
1078 else:
1079 raise RuntimeError(
1080 "'apply()' method not defined for AnisCoherenceEnhancingDiffusionOp if no `freezing_arr` is passed."
1081 )
1082
1083
[docs]
1084class AnisDiffusionOp(_Diffusion):
1085 r"""
1086 Anisotropic diffusion operator, featuring an inhomogeneous anisotropic diffusion tensor.
1087 The diffusion tensor is defined as a function of the
1088 :py:class:`~pyxu.operator.linop.filter.StructureTensor`,
1089 which is a tensor describing the properties of the image in the neighbourhood of each pixel.
1090 This diffusion operator allows anisotropic smoothing by reducing the flux in the direction of the
1091 eigenvector of the structure tensor with largest eigenvalue. Essentially, the (locally averaged)
1092 gradient is preserved.
1093
1094 The gradient of the operator reads :math:`-\mathrm{div}(\mathbf{D}(\mathbf{f})\boldsymbol{\nabla}\mathbf{f})`.
1095
1096 Let :math:`\mathbf{v}_0^i,\mathbf{v}_1^i` be the eigenvectors of the :math:`i`-th pixel structure tensor
1097 :math:`S_i`, with eigenvalues :math:`e_0^i, e_1^i` sorted in decreasing order. The diffusion tensor is defined,
1098 for the :math:`i`-th pixel, as the symmetric positive definite tensor:
1099
1100 .. math::
1101 \big(\mathbf{D}(\mathbf{f})\big)_i = \lambda_0^i\mathbf{v}_0^i(\mathbf{v}_0^i)^T+\lambda_1^i\mathbf{v}_1^i(\mathbf{v}_1^i)^T,
1102
1103 where the smoothing intensities along the two eigendirections are defined, to achieve anisotropic smoothing, as
1104
1105 .. math::
1106 \lambda_0^i &= \alpha,\\
1107 \lambda_1^i &= 1,
1108
1109 where :math:`\alpha \in (0, 1)` controls the smoothing intensity along the first eigendirection.
1110
1111 In general, the diffusion operator does not admit a potential. However, if the diffusion tensor is evaluated
1112 at some image :math:`\tilde{\mathbf{f}}` and kept fixed to :math:`\mathbf{D}(\tilde{\mathbf{f}})`, the operator
1113 derives from the potential :math:`\Vert\sqrt{\mathbf{D}(\tilde{\mathbf{f}})}\boldsymbol{\nabla}\mathbf{f}\Vert_2^2`.
1114 By doing so, the smoothing directions and intensities are fixed according to the features of an image of interest.
1115 This can be achieved passing a *freezing array* at initialization. In such cases, a *matrix-based implementation*
1116 is recommended for efficiency.
1117
1118 Parameters
1119 ----------
1120 dim_shape: NDArrayShape
1121 Shape of the input array.
1122 alpha: Real
1123 Anisotropic parameter, determining intensity of smoothing inhibition along the (locally averaged) image gradient.
1124 Defaults to 1e-1.
1125 sigma_gd_st: Real
1126 Gaussian width of the gaussian derivative involved in the structure tensor computation
1127 (if `diff_method_struct_tens="gd"`). Defaults to 1.
1128 smooth_sigma_st: Real
1129 Width of the Gaussian filter smoothing the structure tensor (local averaging). Defaults to 1.
1130 sampling: Real, list[Real]
1131 Sampling step (i.e. distance between two consecutive elements of an array).
1132 Defaults to 1.
1133 diff_method_struct_tens: str
1134 Differentiation method for structure tensor computation. Must be either 'gd' or 'fd'.
1135 Defaults ot 'fd'.
1136 freezing_arr: NDArray
1137 Array at which the diffusion tensor is evaluated and then frozen.
1138 matrix_based_impl: bool
1139 Whether to use matrix based implementation or not. Defaults to False. Recommended to set `True` if
1140 a ``freezing_arr`` is passed.
1141
1142 Returns
1143 -------
1144 op: OpT
1145 Anisotropic diffusion operator.
1146
1147 """
1148
1149 def __init__(
1150 self,
1151 dim_shape: pyct.NDArrayShape,
1152 alpha: pyct.Real = 0.1,
1153 sigma_gd_st: pyct.Real = 1,
1154 sampling: typ.Union[pyct.Real, cabc.Sequence[pyct.Real, ...]] = 1,
1155 diff_method_struct_tens: str = "fd",
1156 freezing_arr: pyct.NDArray = None,
1157 matrix_based_impl: bool = False,
1158 gpu: bool = False,
1159 dtype: typ.Optional[pyct.DType] = None,
1160 ):
1161 gradient = pydiff.Gradient(
1162 dim_shape=dim_shape,
1163 directions=(1, 2),
1164 diff_method="fd",
1165 scheme="forward",
1166 mode="symmetric",
1167 sampling=sampling,
1168 gpu=gpu,
1169 dtype=dtype,
1170 )
1171 structure_tensor = pyfilt.StructureTensor(
1172 dim_shape=dim_shape[1:],
1173 diff_method=diff_method_struct_tens,
1174 smooth_sigma=0,
1175 sigma=sigma_gd_st,
1176 mode="symmetric",
1177 sampling=sampling,
1178 gpu=gpu,
1179 dtype=dtype,
1180 )
1181 anis_diffusion_coeff = DiffusionCoeffAnisotropic(
1182 dim_shape=dim_shape, structure_tensor=structure_tensor, alpha=alpha
1183 )
1184 self.freezing_arr = freezing_arr
1185 if freezing_arr is not None:
1186 if len(freezing_arr.shape) == len(dim_shape) - 1:
1187 xp = pycu.get_array_module(freezing_arr)
1188 self.freezing_arr = xp.expand_dims(freezing_arr, axis=0)
1189 anis_diffusion_coeff.freeze(self.freezing_arr)
1190 super().__init__(
1191 dim_shape,
1192 gradient=gradient,
1193 diffusion_coefficient=anis_diffusion_coeff,
1194 matrix_based_impl=matrix_based_impl,
1195 gpu=gpu,
1196 dtype=dtype,
1197 )
1198 # initialize lipschitz constants. should this be done instead inside _DiffusionOp.__init__ ??
1199 self.diff_lipschitz = gradient.lipschitz**2
1200
1201 # @pycrt.enforce_precision(i="arr")
1202 # @pycu.vectorize("arr")
1203 def apply(self, arr: pyct.NDArray) -> pyct.Real:
1204 if self.freezing_arr is not None:
1205 xp = pycu.get_array_module(arr)
1206 y = self._compute_divergence_term(arr) # (batch,nchannels,nx,ny)
1207 return xp.einsum("...jkl,...jkl->...", y, arr) / 2 # (batch,)
1208 else:
1209 raise RuntimeError("'apply()' method not defined for AnisDiffusionOp if no `freezing_arr` is passed.")
1210
1211
1212# class AnisMfiDiffusionOp(_Diffusion):
1213# r"""
1214# Example
1215# -------
1216#
1217# .. plot::
1218#
1219# import numpy as np
1220# import matplotlib.pyplot as plt
1221# import pyxu.opt.solver as pysol
1222# import pyxu.abc.solver as pysolver
1223# import pyxu.opt.stop as pystop
1224# import src.diffusion_ops.operator as diffop
1225# import skimage as skim
1226#
1227# # Import RGB image
1228# image = skim.data.cat().astype(float)
1229# print(image.shape) #(300, 451, 3)
1230# # move color-stacking axis to front (needed for pyxu stacking convention)
1231# image = np.moveaxis(image, 2, 0)
1232# print(image.shape) #(3, 300, 451)
1233# # Instantiate diffusion operator
1234# coh_enh_diffop = diffop.AnisCoherenceEnhancingDiffusionOp(dim_shape=(300, 451), nchannels=3, alpha=1e-3, m=1)
1235# # Define PGD solver, with stopping criterion and starting point x0
1236# stop_crit = pystop.MaxIter(n=100)
1237# x0 = image.reshape(1,-1)
1238# # Perform 100 gradient flow iterations starting from x0
1239# PGD = pysol.PGD(f = coh_enh_diffop, show_progress=False, verbosity=100)
1240# PGD.fit(**dict(mode=pysolver.SolverMode.BLOCK, x0=x0, stop_crit=stop_crit,
1241# tau=2/coh_enh_diffop.diff_lipschitz))
1242# coh_enhanced_image = PGD.solution()
1243# # Reshape images for plotting.
1244# image = np.moveaxis(image, 0, 2)
1245# coh_enhanced_image = np.moveaxis(coh_enhanced_image.reshape(3, 300, 451), 0, 2)
1246# # Plot
1247# fig, ax = plt.subplots(1,2,figsize=(10,5))
1248# ax[0].imshow(image.astype(int))
1249# ax[0].set_title("Image", fontsize=15)
1250# ax[0].axis('off')
1251# ax[1].imshow(coh_enhanced_image.astype(int))
1252# ax[1].set_title("100 iterations Coherence-Enhancing", fontsize=15)
1253# ax[1].axis('off')
1254#
1255# """
1256#
1257# def __init__(
1258# self,
1259# dim_shape: pyct.NDArrayShape,
1260# freezing_arr_grad: pyct.NDArray,
1261# extra_term: bool = True,
1262# beta: pyct.Real = 1,
1263# clipping_value: pyct.Real = 1e-5,
1264# sigma_gd_st: pyct.Real = 1,
1265# alpha: pyct.Real = 0.1,
1266# sampling: typ.Union[pyct.Real, cabc.Sequence[pyct.Real, ...]] = 1,
1267# diff_method_struct_tens: str = "fd",
1268# ):
1269# # Needs to be revisited and adjusted to new changes in shapes etc. also, very interesting, because we want to implement
1270# # approach shown here by default for all diffusion ops, so we should carefully consider this
1271#
1272# # instantiate custom gradient operator, obtained projecting actual gradient along the structure tensor's eigendirections
1273# # define non-oriented gradient matrix of shape (2N, N)
1274# Dx = - np.diag(np.ones(dim_shape[0])) + np.diag(np.ones(dim_shape[0] - 1), 1)
1275# Dx[-1, -1] = 0 # symmetric boundary conditions, no flux
1276# Dy = - np.diag(np.ones(dim_shape[1])) + np.diag(np.ones(dim_shape[1] - 1), 1)
1277# Dy[-1, -1] = 0 # symmetric boundary conditions, no flux
1278# # define gradient matrix
1279# D = np.vstack((np.kron(Dx, np.eye(dim_shape[1])), np.kron(np.eye(dim_shape[0]), Dy)))
1280# # instantiate helper reg_fct_matrixfree to access to its methods
1281# reg_fct_matrixfree = AnisDiffusionOp(dim_shape=dim_shape,
1282# alpha=alpha,
1283# sigma_gd_st=sigma_gd_st,
1284# sampling=sampling,
1285# diff_method_struct_tens=diff_method_struct_tens)
1286# reg_fct_matrixfree._op.diffusion_coefficient.alpha = alpha
1287# # returns eigenvectors u of shape (N, 2, 2), eigenvalues e of shape (N, 2)
1288# u, e = reg_fct_matrixfree._op.diffusion_coefficient._eigendecompose_struct_tensor(freezing_arr_grad.reshape(1, -1))
1289# # returns lambdas of shape (N, 2)
1290# lambdas = reg_fct_matrixfree._op.diffusion_coefficient._compute_intensities(e)
1291# lambdas = np.sqrt(lambdas)
1292# # assemble coefficients w_coeffs of shape (2N, 1) to obtain oriented gradient
1293# w_coeffs_e1 = (lambdas[:, 0] * u[:, :, 0]).flatten(order='F')
1294# w_coeffs_e2 = (lambdas[:, 1] * u[:, :, 1]).flatten(order='F')
1295# # compute oriented gradient matrices of shape (2N, N)
1296# oriented_gradient_e1 = w_coeffs_e1 * D
1297# oriented_gradient_e2 = w_coeffs_e2 * D
1298# # assemble oriented gradient matrix by summing the two subblocks
1299# N = np.prod(dim_shape)
1300# oriented_gradient_matrix = np.vstack((oriented_gradient_e1[:N, :] + oriented_gradient_e1[N:, :],
1301# oriented_gradient_e2[:N, :] + oriented_gradient_e2[N:, :]))
1302# # instantiate gradient operator
1303# gradient = pxa.LinOp.from_array(A=sp.csr_matrix(oriented_gradient_matrix))
1304#
1305# # instantiate mfi_diffusion coeff
1306# mfi_diffusion_coeff = DiffusionCoeffIsotropic(
1307# dim_shape=dim_shape, diffusivity=MfiDiffusivity(dim_shape=dim_shape, beta=beta, clipping_value=clipping_value)
1308# )
1309# if extra_term:
1310# mfi_extra_term = MfiExtraTerm(dim_shape=dim_shape, gradient=gradient, beta=beta, clipping_value=clipping_value)
1311# super().__init__(
1312# dim_shape, gradient=gradient, diffusion_coefficient=mfi_diffusion_coeff, extra_diffusion_term=mfi_extra_term
1313# )
1314# else:
1315# super().__init__(
1316# dim_shape, gradient=gradient, diffusion_coefficient=mfi_diffusion_coeff)
1317# # initialize lipschitz constants. should this be done instead inside _DiffusionOp.__init__ ??
1318# self.diff_lipschitz = gradient.lipschitz ** 2
1319# if extra_term:
1320# self.diff_lipschitz *= 2
1321#
1322# #@pycrt.enforce_precision(i="arr")
1323# @pycu.vectorize("arr")
1324# def apply(self, arr: pyct.NDArray) -> pyct.Real:
1325# if self.extra_diffusion_term is not None:
1326# xp = pycu.get_array_module(arr)
1327# y = self._compute_divergence_term(arr)
1328# arr = arr.reshape(1, -1)
1329# return xp.dot(arr, y.T) / 2
1330# else:
1331# raise RuntimeError("'apply()' method not defined for AnisMfiDiffusionOp with no MfiExtraTerm: no underlying variational intepretation")