From f0258a2252dd454630b27b46d9ae737c5f501b9e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 16:26:00 +0000 Subject: [PATCH] Deployed dad1d74 with MkDocs version: 1.6.1 --- .nojekyll | 0 404.html | 732 ++ api/grad_operator/index.html | 3926 ++++++++++ api/length_model/index.html | 5030 ++++++++++++ api/loss_recorder/index.html | 2493 ++++++ api/momentum_operator/index.html | 3783 ++++++++++ api/utils/index.html | 2927 +++++++ api/weight_model/index.html | 1477 ++++ assets/PKU.svg | 771 ++ assets/TUM.svg | 57 + assets/_mkdocstrings.css | 143 + assets/algorithm.png | Bin 0 -> 339757 bytes assets/config.png | Bin 0 -> 27603 bytes assets/config.svg | 2871 +++++++ assets/config_colorful.svg | 2805 +++++++ assets/config_illustration.png | Bin 0 -> 256100 bytes assets/config_white.png | Bin 0 -> 4300 bytes assets/config_white.svg | 2806 +++++++ assets/download.svg | 122 + assets/github.svg | 14 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.83f73b43.min.js | 16 + assets/javascripts/bundle.83f73b43.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.6ce7567c.min.js | 42 + .../workers/search.6ce7567c.min.js.map | 7 + assets/stylesheets/main.0253249f.min.css | 1 + assets/stylesheets/main.0253249f.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + assets/troubleshooting/difweightmodel.png | Bin 0 -> 27776 bytes assets/troubleshooting/difweightmodel.svg | 1291 ++++ examples/mtl_toy/index.html | 2041 +++++ examples/mtl_toy/mtl_toy.ipynb | 467 ++ examples/pinn_burgers/index.html | 2108 ++++++ examples/pinn_burgers/pinn_burgers.ipynb | 497 ++ index.html | 949 +++ javascripts/mathjax.js | 16 + objects.inv | Bin 0 -> 1589 bytes requirements.txt | 7 + search/search_index.json | 1 + sitemap.xml | 51 + sitemap.xml.gz | Bin 0 -> 305 bytes start/start/index.html | 973 +++ start/theory/index.html | 969 +++ start/troubleshooting/index.html | 1600 ++++ start/troubleshooting/troubleshooting.ipynb | 179 + 80 files changed, 48383 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 api/grad_operator/index.html create mode 100644 api/length_model/index.html create mode 100644 api/loss_recorder/index.html create mode 100644 api/momentum_operator/index.html create mode 100644 api/utils/index.html create mode 100644 api/weight_model/index.html create mode 100644 assets/PKU.svg create mode 100644 assets/TUM.svg create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/algorithm.png create mode 100644 assets/config.png create mode 100644 assets/config.svg create mode 100644 assets/config_colorful.svg create mode 100644 assets/config_illustration.png create mode 100644 assets/config_white.png create mode 100644 assets/config_white.svg create mode 100644 assets/download.svg create mode 100644 assets/github.svg create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.83f73b43.min.js create mode 100644 assets/javascripts/bundle.83f73b43.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js.map create mode 100644 assets/stylesheets/main.0253249f.min.css create mode 100644 assets/stylesheets/main.0253249f.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 assets/troubleshooting/difweightmodel.png create mode 100644 assets/troubleshooting/difweightmodel.svg create mode 100644 examples/mtl_toy/index.html create mode 100644 examples/mtl_toy/mtl_toy.ipynb create mode 100644 examples/pinn_burgers/index.html create mode 100644 examples/pinn_burgers/pinn_burgers.ipynb create mode 100644 index.html create mode 100644 javascripts/mathjax.js create mode 100644 objects.inv create mode 100644 requirements.txt create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 start/start/index.html create mode 100644 start/theory/index.html create mode 100644 start/troubleshooting/index.html create mode 100644 start/troubleshooting/troubleshooting.ipynb diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..7b4b8a4 --- /dev/null +++ b/404.html @@ -0,0 +1,732 @@ + + + +
+ + + + + + + + + + + + + + + + + + +The grad_operator
module contains the main operators of ConFIG algorithm. You can use these operators to perform the ConFIG update step for your optimization problem.
conflictfree.grad_operator.ConFIG_update
+
+
+¤ConFIG_update(
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ weight_model: WeightModel = EqualWeight(),
+ length_model: LengthModel = ProjectionLength(),
+ use_least_square: bool = True,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Performs the standard ConFIG update step.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ weight_model
+ |
+
+ WeightModel
+ |
+
+
+
+ The weight model for calculating the direction weights. +Defaults to EqualWeight(), which will make the final update gradient not biased towards any gradient. + |
+
+ EqualWeight()
+ |
+
+ length_model
+ |
+
+ LengthModel
+ |
+
+
+
+ The length model for rescaling the length of the final gradient. +Defaults to ProjectionLength(), which will project each gradient vector onto the final gradient vector to get the final length. + |
+
+ ProjectionLength()
+ |
+
+ use_least_square
+ |
+
+ bool
+ |
+
+
+
+ Whether to use the least square method for calculating the best direction.
+If set to False, we will directly calculate the pseudo-inverse of the gradient matrix. See |
+
+ True
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to the weight and length model. If your weight/length model doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The final update gradient. + |
+
Examples:
+from conflictfree.grad_operator import ConFIG_update
+from conflictfree.utils import get_gradient_vector,apply_gradient_vector
+optimizer=torch.Adam(network.parameters(),lr=1e-3)
+for input_i in dataset:
+ grads=[] # we record gradients rather than losses
+ for loss_fn in loss_fns:
+ optimizer.zero_grad()
+ loss_i=loss_fn(input_i)
+ loss_i.backward()
+ grads.append(get_gradient_vector(network)) #get loss-specfic gradient
+ g_config=ConFIG_update(grads) # calculate the conflict-free direction
+ apply_gradient_vector(network) # set the condlict-free direction to the network
+ optimizer.step()
+
conflictfree/grad_operator.py
82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 |
|
conflictfree.grad_operator.ConFIG_update_double
+
+
+¤ConFIG_update_double(
+ grad_1: torch.Tensor,
+ grad_2: torch.Tensor,
+ weight_model: WeightModel = EqualWeight(),
+ length_model: LengthModel = ProjectionLength(),
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
ConFIG update for two gradients where no inverse calculation is needed.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ grad_1
+ |
+
+ Tensor
+ |
+
+
+
+ The first gradient. + |
+ + required + | +
+ grad_2
+ |
+
+ Tensor
+ |
+
+
+
+ The second gradient. + |
+ + required + | +
+ weight_model
+ |
+
+ WeightModel
+ |
+
+
+
+ The weight model for calculating the direction weights. +Defaults to EqualWeight(), which will make the final update gradient not biased towards any gradient. + |
+
+ EqualWeight()
+ |
+
+ length_model
+ |
+
+ LengthModel
+ |
+
+
+
+ The length model for rescaling the length of the final gradient. +Defaults to ProjectionLength(), which will project each gradient vector onto the final gradient vector to get the final length. + |
+
+ ProjectionLength()
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to the weight and length model. If your weight/length model doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The final update gradient. + |
+
Examples:
+from conflictfree.grad_operator import ConFIG_update_double
+from conflictfree.utils import get_gradient_vector,apply_gradient_vector
+optimizer=torch.Adam(network.parameters(),lr=1e-3)
+for input_i in dataset:
+ grads=[] # we record gradients rather than losses
+ for loss_fn in [loss_fn1, loss_fn2]:
+ optimizer.zero_grad()
+ loss_i=loss_fn(input_i)
+ loss_i.backward()
+ grads.append(get_gradient_vector(network)) #get loss-specfic gradient
+ g_config=ConFIG_update_double(grads) # calculate the conflict-free direction
+ apply_gradient_vector(network) # set the condlict-free direction to the network
+ optimizer.step()
+
conflictfree/grad_operator.py
11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 |
|
conflictfree.grad_operator.ConFIGOperator
+
+
+¤
+ Bases: GradientOperator
Operator for the ConFIG algorithm.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ weight_model
+ |
+
+ WeightModel
+ |
+
+
+
+ The weight model for calculating the direction weights. +Defaults to EqualWeight(), which will make the final update gradient not biased towards any gradient. + |
+
+ EqualWeight()
+ |
+
+ length_model
+ |
+
+ LengthModel
+ |
+
+
+
+ The length model for rescaling the length of the final gradient. +Defaults to ProjectionLength(), which will project each gradient vector onto the final gradient vector to get the final length. + |
+
+ ProjectionLength()
+ |
+
+ allow_simplified_model
+ |
+
+ bool
+ |
+
+
+
+ Whether to allow simplified model for calculating the gradient. +If set to True, will use simplified form of ConFIG method when there are only two losses (ConFIG_update_double). Defaults to True. + |
+
+ True
+ |
+
+ use_least_square
+ |
+
+ bool
+ |
+
+
+
+ Whether to use the least square method for calculating the best direction.
+If set to False, we will directly calculate the pseudo-inverse of the gradient matrix. See |
+
+ True
+ |
+
Examples:
+from conflictfree.grad_operator import ConFIGOperator
+from conflictfree.utils import get_gradient_vector,apply_gradient_vector
+optimizer=torch.Adam(network.parameters(),lr=1e-3)
+operator=ConFIGOperator() # initialize operator
+for input_i in dataset:
+ grads=[]
+ for loss_fn in loss_fns:
+ optimizer.zero_grad()
+ loss_i=loss_fn(input_i)
+ loss_i.backward()
+ grads.append(get_gradient_vector(network))
+ g_config=operator.calculate_gradient(grads) # calculate the conflict-free direction
+ apply_gradient_vector(network) # or simply use `operator.update_gradient(network,grads)` to calculate and set the condlict-free direction to the network
+ optimizer.step()
+
conflictfree/grad_operator.py
205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 |
|
allow_simplified_model
+
+
+
+ instance-attribute
+
+
+¤allow_simplified_model = allow_simplified_model
+
use_least_square
+
+
+
+ instance-attribute
+
+
+¤use_least_square = use_least_square
+
update_gradient
+
+
+¤update_gradient(
+ network: torch.nn.Module,
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Sequence] = None,
+) -> None
+
Calculate the gradient and apply the gradient to the network.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The target network. + |
+ + required + | +
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to the weight and length model. If your weight/length model doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ None
+ |
+
+
+
+ None + |
+
conflictfree/grad_operator.py
181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 |
|
__init__
+
+
+¤__init__(
+ weight_model: WeightModel = EqualWeight(),
+ length_model: LengthModel = ProjectionLength(),
+ allow_simplified_model: bool = True,
+ use_least_square: bool = True,
+)
+
conflictfree/grad_operator.py
240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 |
|
calculate_gradient
+
+
+¤calculate_gradient(
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the gradient using the ConFIG algorithm.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to the weight and length model. If your weight/length model doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The calculated gradient. + |
+
conflictfree/grad_operator.py
253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 |
|
conflictfree.grad_operator.PCGradOperator
+
+
+¤
+ Bases: GradientOperator
PCGradOperator class represents a gradient operator for PCGrad algorithm.
+@inproceedings{yu2020gradient, +title={Gradient surgery for multi-task learning}, +author={Yu, Tianhe and Kumar, Saurabh and Gupta, Abhishek and Levine, Sergey and Hausman, Karol and Finn, Chelsea}, +booktitle={34th International Conference on Neural Information Processing Systems}, +year={2020}, +url={https://dl.acm.org/doi/abs/10.5555/3495724.3496213} +}
+ + + + + + +conflictfree/grad_operator.py
291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 |
|
__init__
+
+
+¤__init__()
+
conflictfree/grad_operator.py
154 +155 |
|
update_gradient
+
+
+¤update_gradient(
+ network: torch.nn.Module,
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Sequence] = None,
+) -> None
+
Calculate the gradient and apply the gradient to the network.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The target network. + |
+ + required + | +
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to the weight and length model. If your weight/length model doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ None
+ |
+
+
+
+ None + |
+
conflictfree/grad_operator.py
181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 |
|
calculate_gradient
+
+
+¤calculate_gradient(
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the gradient using the PCGrad algorithm.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ This parameter should not be set for current operator. Defaults to None. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The calculated gradient using PCGrad method. + |
+
conflictfree/grad_operator.py
305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 |
|
conflictfree.grad_operator.IMTLGOperator
+
+
+¤
+ Bases: GradientOperator
PCGradOperator class represents a gradient operator for IMTL-G algorithm.
+@inproceedings{ +liu2021towards, +title={Towards Impartial Multi-task Learning}, +author={Liyang Liu and Yi Li and Zhanghui Kuang and Jing-Hao Xue and Yimin Chen and Wenming Yang and Qingmin Liao and Wayne Zhang}, +booktitle={International Conference on Learning Representations}, +year={2021}, +url={https://openreview.net/forum?id=IMPnRXEWpvr} +}
+ + + + + + +conflictfree/grad_operator.py
335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 |
|
__init__
+
+
+¤__init__()
+
conflictfree/grad_operator.py
154 +155 |
|
update_gradient
+
+
+¤update_gradient(
+ network: torch.nn.Module,
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Sequence] = None,
+) -> None
+
Calculate the gradient and apply the gradient to the network.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The target network. + |
+ + required + | +
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to the weight and length model. If your weight/length model doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ None
+ |
+
+
+
+ None + |
+
conflictfree/grad_operator.py
181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 |
|
calculate_gradient
+
+
+¤calculate_gradient(
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the gradient using the IMTL-G algorithm.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ This parameter should not be set for current operator. Defaults to None. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The calculated gradient using IMTL-G method. + |
+
conflictfree/grad_operator.py
350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 |
|
conflictfree.grad_operator.GradientOperator
+
+
+¤A base class that represents a gradient operator.
+ + +Methods:
+Name | +Description | +
---|---|
calculate_gradient |
+
+
+
+ Calculates the gradient based on the given gradients and losses. + |
+
update_gradient |
+
+
+
+ Updates the gradient of the network based on the calculated gradient. + |
+
conflictfree/grad_operator.py
144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 |
|
__init__
+
+
+¤__init__()
+
conflictfree/grad_operator.py
154 +155 |
|
calculate_gradient
+
+
+¤calculate_gradient(
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the gradient based on the given gradients and losses.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to the weight and length model. If your weight/length model doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The calculated gradient. + |
+
Raises:
+Type | +Description | +
---|---|
+ NotImplementedError
+ |
+
+
+
+ If the method is not implemented. + |
+
conflictfree/grad_operator.py
157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 |
|
update_gradient
+
+
+¤update_gradient(
+ network: torch.nn.Module,
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Sequence] = None,
+) -> None
+
Calculate the gradient and apply the gradient to the network.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The target network. + |
+ + required + | +
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to the weight and length model. If your weight/length model doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ None
+ |
+
+
+
+ None + |
+
conflictfree/grad_operator.py
181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 |
|
The length_model
module contains classes for rescaling the magnitude of the final gradient vector.
+The ProjectionLength
class is the default length model for the ConFIG algorithm. You can create a custom length model by inheriting from the LengthModel
class.
conflictfree.length_model.ProjectionLength
+
+
+¤
+ Bases: LengthModel
Rescale the length of the target vector based on the projection of the gradients on the target vector:
+conflictfree/length_model.py
70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 |
|
rescale_length
+
+
+¤rescale_length(
+ target_vector: torch.Tensor,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Rescales the length of the target vector based on the given parameters. +It calls the get_length method to calculate the length and then rescales the target vector.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Tensor
+ |
+
+
+
+ The final update gradient vector. + |
+ + required + | +
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The rescaled target vector. + |
+
conflictfree/length_model.py
40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
__init__
+
+
+¤__init__()
+
conflictfree/length_model.py
79 +80 |
|
get_length
+
+
+¤get_length(
+ target_vector: Optional[torch.Tensor] = None,
+ unit_target_vector: Optional[torch.Tensor] = None,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the length based on the given parameters. Not all parameters are required.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The final update gradient vector.
+One of the |
+
+ None
+ |
+
+ unit_target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The unit vector of the target vector.
+One of the |
+
+ None
+ |
+
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. Not used in this model. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ Union[torch.Tensor, float]: The calculated length. + |
+
conflictfree/length_model.py
82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 |
|
conflictfree.length_model.TrackMinimum
+
+
+¤
+ Bases: _FlexibleTrackProjectionLength
Rescale the length of the target vector based on the projection of the gradients on the target vector. +All the gradients will be rescaled to the same length as the minimum gradient before projection, i.e., the minimum gradient will be the same length as the target vector.
+conflictfree/length_model.py
160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 |
|
get_length
+
+
+¤get_length(
+ target_vector: Optional[torch.Tensor] = None,
+ unit_target_vector: Optional[torch.Tensor] = None,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the length based on the given parameters. Not all parameters are required.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The final update gradient vector.
+One of the |
+
+ None
+ |
+
+ unit_target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The unit vector of the target vector.
+One of the |
+
+ None
+ |
+
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. Not used in this model. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ Union[torch.Tensor, float]: The calculated length. + |
+
conflictfree/length_model.py
122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 |
|
rescale_length
+
+
+¤rescale_length(
+ target_vector: torch.Tensor,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Rescales the length of the target vector based on the given parameters. +It calls the get_length method to calculate the length and then rescales the target vector.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Tensor
+ |
+
+
+
+ The final update gradient vector. + |
+ + required + | +
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The rescaled target vector. + |
+
conflictfree/length_model.py
40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
__init__
+
+
+¤__init__()
+
conflictfree/length_model.py
170 +171 |
|
conflictfree.length_model.TrackMaximum
+
+
+¤
+ Bases: _FlexibleTrackProjectionLength
Rescale the length of the target vector based on the projection of the gradients on the target vector. +All the gradients will be rescaled to the same length as the maximum gradient before projection, i.e., the maximum gradient will be the same length as the target vector.
+conflictfree/length_model.py
177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 |
|
get_length
+
+
+¤get_length(
+ target_vector: Optional[torch.Tensor] = None,
+ unit_target_vector: Optional[torch.Tensor] = None,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the length based on the given parameters. Not all parameters are required.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The final update gradient vector.
+One of the |
+
+ None
+ |
+
+ unit_target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The unit vector of the target vector.
+One of the |
+
+ None
+ |
+
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. Not used in this model. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ Union[torch.Tensor, float]: The calculated length. + |
+
conflictfree/length_model.py
122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 |
|
rescale_length
+
+
+¤rescale_length(
+ target_vector: torch.Tensor,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Rescales the length of the target vector based on the given parameters. +It calls the get_length method to calculate the length and then rescales the target vector.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Tensor
+ |
+
+
+
+ The final update gradient vector. + |
+ + required + | +
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The rescaled target vector. + |
+
conflictfree/length_model.py
40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
__init__
+
+
+¤__init__()
+
conflictfree/length_model.py
187 +188 |
|
conflictfree.length_model.TrackHarmonicAverage
+
+
+¤
+ Bases: _FlexibleTrackProjectionLength
Rescale the length of the target vector based on the projection of the gradients on the target vector. +All the gradients will be rescaled to the harmonic average of the lengths of all gradients before projection, i.e., the minimum gradient will be the same length as the target vector.
+where
+The harmonic average can be used to avoid the influence of the large gradients.
+ + + + + + +conflictfree/length_model.py
194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 |
|
get_length
+
+
+¤get_length(
+ target_vector: Optional[torch.Tensor] = None,
+ unit_target_vector: Optional[torch.Tensor] = None,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the length based on the given parameters. Not all parameters are required.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The final update gradient vector.
+One of the |
+
+ None
+ |
+
+ unit_target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The unit vector of the target vector.
+One of the |
+
+ None
+ |
+
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. Not used in this model. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ Union[torch.Tensor, float]: The calculated length. + |
+
conflictfree/length_model.py
122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 |
|
rescale_length
+
+
+¤rescale_length(
+ target_vector: torch.Tensor,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Rescales the length of the target vector based on the given parameters. +It calls the get_length method to calculate the length and then rescales the target vector.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Tensor
+ |
+
+
+
+ The final update gradient vector. + |
+ + required + | +
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The rescaled target vector. + |
+
conflictfree/length_model.py
40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
__init__
+
+
+¤__init__()
+
conflictfree/length_model.py
212 +213 |
|
conflictfree.length_model.TrackArithmeticAverage
+
+
+¤
+ Bases: _FlexibleTrackProjectionLength
Rescale the length of the target vector based on the projection of the gradients on the target vector. +All the gradients will be rescaled to the arithmetic average of the lengths of all gradients before projection, i.e., the minimum gradient will be the same length as the target vector.
+where
+conflictfree/length_model.py
219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 |
|
get_length
+
+
+¤get_length(
+ target_vector: Optional[torch.Tensor] = None,
+ unit_target_vector: Optional[torch.Tensor] = None,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the length based on the given parameters. Not all parameters are required.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The final update gradient vector.
+One of the |
+
+ None
+ |
+
+ unit_target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The unit vector of the target vector.
+One of the |
+
+ None
+ |
+
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. Not used in this model. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ Union[torch.Tensor, float]: The calculated length. + |
+
conflictfree/length_model.py
122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 |
|
rescale_length
+
+
+¤rescale_length(
+ target_vector: torch.Tensor,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Rescales the length of the target vector based on the given parameters. +It calls the get_length method to calculate the length and then rescales the target vector.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Tensor
+ |
+
+
+
+ The final update gradient vector. + |
+ + required + | +
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The rescaled target vector. + |
+
conflictfree/length_model.py
40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
__init__
+
+
+¤__init__()
+
conflictfree/length_model.py
235 +236 |
|
conflictfree.length_model.TrackGeometricAverage
+
+
+¤
+ Bases: _FlexibleTrackProjectionLength
Rescale the length of the target vector based on the projection of the gradients on the target vector. +All the gradients will be rescaled to the geometric average of the lengths of all gradients before projection, i.e., the minimum gradient will be the same length as the target vector.
+where
+The geometric average can be used to avoid the influence of the large gradients.
+ + + + + + +conflictfree/length_model.py
242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 |
|
get_length
+
+
+¤get_length(
+ target_vector: Optional[torch.Tensor] = None,
+ unit_target_vector: Optional[torch.Tensor] = None,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the length based on the given parameters. Not all parameters are required.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The final update gradient vector.
+One of the |
+
+ None
+ |
+
+ unit_target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The unit vector of the target vector.
+One of the |
+
+ None
+ |
+
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. Not used in this model. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ Union[torch.Tensor, float]: The calculated length. + |
+
conflictfree/length_model.py
122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 |
|
rescale_length
+
+
+¤rescale_length(
+ target_vector: torch.Tensor,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Rescales the length of the target vector based on the given parameters. +It calls the get_length method to calculate the length and then rescales the target vector.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Tensor
+ |
+
+
+
+ The final update gradient vector. + |
+ + required + | +
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The rescaled target vector. + |
+
conflictfree/length_model.py
40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
__init__
+
+
+¤__init__()
+
conflictfree/length_model.py
260 +261 |
|
conflictfree.length_model.TrackSpecific
+
+
+¤
+ Bases: _FlexibleTrackProjectionLength
Rescale the length of the target vector based on the projection of the gradients on the target vector. +All the gradients will be rescaled to the same length as the specific gradient before projection. +E.g., if the track_id is 2, then all the gradients will be rescaled to the same length as the third gradient before projection.
+conflictfree/length_model.py
267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 |
|
get_length
+
+
+¤get_length(
+ target_vector: Optional[torch.Tensor] = None,
+ unit_target_vector: Optional[torch.Tensor] = None,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Calculates the length based on the given parameters. Not all parameters are required.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The final update gradient vector.
+One of the |
+
+ None
+ |
+
+ unit_target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The unit vector of the target vector.
+One of the |
+
+ None
+ |
+
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. Not used in this model. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ Union[torch.Tensor, float]: The calculated length. + |
+
conflictfree/length_model.py
122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 |
|
rescale_length
+
+
+¤rescale_length(
+ target_vector: torch.Tensor,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Rescales the length of the target vector based on the given parameters. +It calls the get_length method to calculate the length and then rescales the target vector.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Tensor
+ |
+
+
+
+ The final update gradient vector. + |
+ + required + | +
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The rescaled target vector. + |
+
conflictfree/length_model.py
40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
__init__
+
+
+¤__init__(track_id: int)
+
conflictfree/length_model.py
279 +280 +281 |
|
conflictfree.length_model.LengthModel
+
+
+¤The base class for length model.
+ + +Methods:
+Name | +Description | +
---|---|
get_length |
+
+
+
+ Calculates the length based on the given parameters. + |
+
rescale_length |
+
+
+
+ Rescales the length of the target vector based on the given parameters. + |
+
conflictfree/length_model.py
6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
__init__
+
+
+¤__init__()
+
conflictfree/length_model.py
15 +16 |
|
get_length
+
+
+¤get_length(
+ target_vector: Optional[torch.Tensor] = None,
+ unit_target_vector: Optional[torch.Tensor] = None,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> Union[torch.Tensor, float]
+
Calculates the length based on the given parameters. Not all parameters are required.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The final update gradient vector. + |
+
+ None
+ |
+
+ unit_target_vector
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The unit vector of the target vector. + |
+
+ None
+ |
+
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Union[Tensor, float]
+ |
+
+
+
+ Union[torch.Tensor, float]: The calculated length. + |
+
conflictfree/length_model.py
18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 |
|
rescale_length
+
+
+¤rescale_length(
+ target_vector: torch.Tensor,
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+) -> torch.Tensor
+
Rescales the length of the target vector based on the given parameters. +It calls the get_length method to calculate the length and then rescales the target vector.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ target_vector
+ |
+
+ Tensor
+ |
+
+
+
+ The final update gradient vector. + |
+ + required + | +
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The rescaled target vector. + |
+
conflictfree/length_model.py
40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
The loss_recorder
module contains classes for recording the loss values during the optimization process.
+It is used in the momentum version of the ConFIG algorithm to record the loss values. Not every loss is calculated in a single iteration with the momentum version of the ConFIG algorithm. However, sometimes we need to know the information of all the loss values, e.g., logging and calculating length/weight model. You can create a custom loss recorder by inheriting from the LossRecorder
class.
conflictfree.loss_recorder.LatestLossRecorder
+
+
+¤
+ Bases: LossRecorder
A loss recorder return the latest losses.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ num_losses
+ |
+
+ int
+ |
+
+
+
+ The number of losses to record + |
+ + required + | +
conflictfree/loss_recorder.py
69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 |
|
current_losses
+
+
+
+ instance-attribute
+
+
+¤current_losses = [0.0 for i in range(num_losses)]
+
record_all_losses
+
+
+¤record_all_losses(losses: Sequence) -> list
+
Records all the losses and returns the recorded losses.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ losses
+ |
+
+ Tensor
+ |
+
+
+
+ The losses to record. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
list |
+ list
+ |
+
+
+
+ The recorded losses. + |
+
conflictfree/loss_recorder.py
35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 |
|
__init__
+
+
+¤__init__(num_losses: int) -> None
+
conflictfree/loss_recorder.py
77 +78 |
|
record_loss
+
+
+¤record_loss(
+ losses_indexes: Union[int, Sequence[int]],
+ losses: Union[float, Sequence],
+) -> list
+
Records the given loss and returns the recorded loss.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ losses_indexes
+ |
+
+ Union[int, Sequence[int]]
+ |
+
+
+
+ The index of the loss. + |
+ + required + | +
+ losses
+ |
+
+ Tensor
+ |
+
+
+
+ The loss to record. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
list |
+ list
+ |
+
+
+
+ The recorded loss. + |
+
conflictfree/loss_recorder.py
80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 |
|
conflictfree.loss_recorder.MomentumLossRecorder
+
+
+¤
+ Bases: LossRecorder
A loss recorder that records the momentum of the loss.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ num_losses
+ |
+
+ int
+ |
+
+
+
+ The number of losses to record + |
+ + required + | +
+ betas
+ |
+
+ Union[float, Sequence[float]]
+ |
+
+
+
+ The moving average constant. + |
+
+ 0.9
+ |
+
conflictfree/loss_recorder.py
100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 |
|
current_losses
+
+
+
+ instance-attribute
+
+
+¤current_losses = [0.0 for i in range(num_losses)]
+
record_all_losses
+
+
+¤record_all_losses(losses: Sequence) -> list
+
Records all the losses and returns the recorded losses.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ losses
+ |
+
+ Tensor
+ |
+
+
+
+ The losses to record. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
list |
+ list
+ |
+
+
+
+ The recorded losses. + |
+
conflictfree/loss_recorder.py
35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 |
|
__init__
+
+
+¤__init__(
+ num_losses: int,
+ betas: Union[float, Sequence[float]] = 0.9,
+)
+
conflictfree/loss_recorder.py
109 +110 +111 +112 +113 +114 |
|
record_loss
+
+
+¤record_loss(
+ losses_indexes: Union[int, Sequence[int]],
+ losses: Union[float, Sequence],
+) -> list
+
Records the given loss and returns the recorded loss.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ losses_indexes
+ |
+
+ Union[int, Sequence[int]]
+ |
+
+
+
+ The index of the loss. + |
+ + required + | +
+ losses
+ |
+
+ Tensor
+ |
+
+
+
+ The loss to record. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
list |
+ list
+ |
+
+
+
+ The recorded loss. + |
+
conflictfree/loss_recorder.py
116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 |
|
conflictfree.loss_recorder.LossRecorder
+
+
+¤Base class for loss recorders.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ num_losses
+ |
+
+ int
+ |
+
+
+
+ The number of losses to record + |
+ + required + | +
conflictfree/loss_recorder.py
4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 |
|
current_losses
+
+
+
+ instance-attribute
+
+
+¤current_losses = [0.0 for i in range(num_losses)]
+
__init__
+
+
+¤__init__(num_losses: int) -> None
+
conflictfree/loss_recorder.py
12 +13 +14 |
|
record_loss
+
+
+¤record_loss(
+ losses_indexes: Union[int, Sequence[int]],
+ losses: Union[float, Sequence],
+) -> list
+
Records the given loss and returns the recorded losses.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ losses_indexes
+ |
+
+ Union[int, Sequence[int]]
+ |
+
+
+
+ The index of the loss. + |
+ + required + | +
+ losses
+ |
+
+ Tensor
+ |
+
+
+
+ The loss to record. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
list |
+ list
+ |
+
+
+
+ The recorded losses. + |
+
Raises:
+Type | +Description | +
---|---|
+ NotImplementedError
+ |
+
+
+
+ If the method is not implemented. + |
+
conflictfree/loss_recorder.py
16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 |
|
record_all_losses
+
+
+¤record_all_losses(losses: Sequence) -> list
+
Records all the losses and returns the recorded losses.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ losses
+ |
+
+ Tensor
+ |
+
+
+
+ The losses to record. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
list |
+ list
+ |
+
+
+
+ The recorded losses. + |
+
conflictfree/loss_recorder.py
35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 |
|
The momentum_operator
module contains the main operators for the momentum version ConFIG algorithm.
conflictfree.momentum_operator.PseudoMomentumOperator
+
+
+¤
+ Bases: MomentumOperator
The major momentum version. +In this operator, the second momentum is estimated by a pseudo gradient based on the result of the gradient operator. +NOTE: The momentum-based operator, e.g., Adam, is not recommend when using this operator. Please consider using SGD optimizer.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ num_vectors
+ |
+
+ int
+ |
+
+
+
+ The number of gradient vectors. + |
+ + required + | +
+ beta_1
+ |
+
+ float
+ |
+
+
+
+ The moving average constant for the first momentum. + |
+
+ 0.9
+ |
+
+ beta_2
+ |
+
+ float
+ |
+
+
+
+ The moving average constant for the second momentum. + |
+
+ 0.999
+ |
+
+ gradient_operator
+ |
+
+ GradientOperator
+ |
+
+
+
+ The base gradient operator. Defaults to ConFIGOperator(). + |
+
+ ConFIGOperator()
+ |
+
+ loss_recorder
+ |
+
+ LossRecorder
+ |
+
+
+
+ The loss recorder object. +If you want to pass loss information to "update_gradient" method or "apply_gradient" method, you need to specify a loss recorder. Defaults to None. + |
+
+ None
+ |
+
Methods:
+Name | +Description | +
---|---|
calculate_gradient |
+
+
+
+ Calculates the gradient based on the given indexes, gradients, and losses. + |
+
update_gradient |
+
+
+
+ Updates the gradient of the given network with the calculated gradient. + |
+
Examples: +
from conflictfree.momentum_operator import PseudoMomentumOperator
+from conflictfree.utils import get_gradient_vector,apply_gradient_vector
+optimizer=torch.Adam(network.parameters(),lr=1e-3)
+operator=PseudoMomentumOperator(num_vector=len(loss_fns)) # initialize operator, the only difference here is we need to specify the number of gradient vectors.
+for input_i in dataset:
+ grads=[]
+ for loss_fn in loss_fns:
+ optimizer.zero_grad()
+ loss_i=loss_fn(input_i)
+ loss_i.backward()
+ grads.append(get_gradient_vector(network))
+ g_config=operator.calculate_gradient(grads) # calculate the conflict-free direction
+ apply_gradient_vector(network) # or simply use `operator.update_gradient(network,grads)` to calculate and set the condlict-free direction to the network
+ optimizer.step()
+
conflictfree/momentum_operator.py
140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 |
|
gradient_operator
+
+
+
+ instance-attribute
+
+
+¤gradient_operator = gradient_operator
+
loss_recorder
+
+
+
+ instance-attribute
+
+
+¤loss_recorder = loss_recorder
+
update_gradient
+
+
+¤update_gradient(
+ network: torch.nn.Module,
+ indexes: Union[int, Sequence[int]],
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Union[float, Sequence]] = None,
+) -> None
+
Updates the gradient of the given network with the calculated gradient.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The network to update the gradient. + |
+ + required + | +
+ indexes
+ |
+
+ Union[int, Sequence[int]]
+ |
+
+
+
+ The indexes of the gradient vectors and losses to be updated. +The momentum with the given indexes will be updated based on the given gradients. + |
+ + required + | +
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to base gradient operator. If the base gradient operator doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Raises:
+Type | +Description | +
---|---|
+ NotImplementedError
+ |
+
+
+
+ This method must be implemented in a subclass. + |
+
Returns:
+Type | +Description | +
---|---|
+ None
+ |
+
+
+
+ None + |
+
conflictfree/momentum_operator.py
111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 |
|
__init__
+
+
+¤__init__(
+ num_vectors: int,
+ beta_1: float = 0.9,
+ beta_2: float = 0.999,
+ gradient_operator: GradientOperator = ConFIGOperator(),
+ loss_recorder: Optional[LossRecorder] = None,
+) -> None
+
conflictfree/momentum_operator.py
179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 |
|
calculate_gradient
+
+
+¤calculate_gradient(
+ indexes: Union[int, Sequence[int]],
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Union[float, Sequence]] = None,
+) -> torch.Tensor
+
Calculates the gradient based on the given indexes, gradients, and losses.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ indexes
+ |
+
+ Union[int, Sequence[int]]
+ |
+
+
+
+ The indexes of the gradient vectors and losses to be updated. +The momentum with the given indexes will be updated based on the given gradients. + |
+ + required + | +
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to base gradient operator. If the base gradient operator doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Raises:
+Type | +Description | +
---|---|
+ NotImplementedError
+ |
+
+
+
+ This method must be implemented in a subclass. + |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The calculated gradient. + |
+
conflictfree/momentum_operator.py
213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 |
|
conflictfree.momentum_operator.SeparateMomentumOperator
+
+
+¤
+ Bases: MomentumOperator
In this operator, each gradient has its own second gradient. The gradient operator is applied on the rescaled momentum. +NOTE: Please consider using the PseudoMomentumOperator since this operator does not give good performance according to our research. +The momentum-based operator, e.g., Adam, is not recommend when using this operator. Please consider using SGD optimizer.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ num_vectors
+ |
+
+ int
+ |
+
+
+
+ The number of gradient vectors. + |
+ + required + | +
+ beta_1
+ |
+
+ float
+ |
+
+
+
+ The moving average constant for the first momentum. + |
+
+ 0.9
+ |
+
+ beta_2
+ |
+
+ float
+ |
+
+
+
+ The moving average constant for the second momentum. + |
+
+ 0.999
+ |
+
+ gradient_operator
+ |
+
+ GradientOperator
+ |
+
+
+
+ The base gradient operator. Defaults to ConFIGOperator(). + |
+
+ ConFIGOperator()
+ |
+
+ loss_recorder
+ |
+
+ LossRecorder
+ |
+
+
+
+ The loss recorder object. +If you want to pass loss information to "update_gradient" method or "apply_gradient" method, you need to specify a loss recorder. Defaults to None. + |
+
+ None
+ |
+
Methods:
+Name | +Description | +
---|---|
calculate_gradient |
+
+
+
+ Calculates the gradient based on the given indexes, gradients, and losses. + |
+
update_gradient |
+
+
+
+ Updates the gradient of the given network with the calculated gradient. + |
+
conflictfree/momentum_operator.py
269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 |
|
gradient_operator
+
+
+
+ instance-attribute
+
+
+¤gradient_operator = gradient_operator
+
loss_recorder
+
+
+
+ instance-attribute
+
+
+¤loss_recorder = loss_recorder
+
update_gradient
+
+
+¤update_gradient(
+ network: torch.nn.Module,
+ indexes: Union[int, Sequence[int]],
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Union[float, Sequence]] = None,
+) -> None
+
Updates the gradient of the given network with the calculated gradient.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The network to update the gradient. + |
+ + required + | +
+ indexes
+ |
+
+ Union[int, Sequence[int]]
+ |
+
+
+
+ The indexes of the gradient vectors and losses to be updated. +The momentum with the given indexes will be updated based on the given gradients. + |
+ + required + | +
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to base gradient operator. If the base gradient operator doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Raises:
+Type | +Description | +
---|---|
+ NotImplementedError
+ |
+
+
+
+ This method must be implemented in a subclass. + |
+
Returns:
+Type | +Description | +
---|---|
+ None
+ |
+
+
+
+ None + |
+
conflictfree/momentum_operator.py
111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 |
|
__init__
+
+
+¤__init__(
+ num_vectors: int,
+ beta_1: float = 0.9,
+ beta_2: float = 0.999,
+ gradient_operator: GradientOperator = ConFIGOperator(),
+ loss_recorder: Optional[LossRecorder] = None,
+) -> None
+
conflictfree/momentum_operator.py
291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 |
|
calculate_gradient
+
+
+¤calculate_gradient(
+ indexes: Union[int, Sequence[int]],
+ grads: Union[torch.Tensor, Sequence[torch.Tensor]],
+ losses: Optional[Union[float, Sequence]] = None,
+) -> torch.Tensor
+
Calculates the gradient based on the given indexes, gradients, and losses.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ indexes
+ |
+
+ Union[int, Sequence[int]]
+ |
+
+
+
+ The indexes of the gradient vectors and losses to be updated. +The momentum with the given indexes will be updated based on the given gradients. + |
+ + required + | +
+ grads
+ |
+
+ Union[Tensor, Sequence[Tensor]]
+ |
+
+
+
+ The gradients to update. +It can be a stack of gradient vectors (at dim 0) or a sequence of gradient vectors. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses associated with the gradients. +The losses will be passed to base gradient operator. If the base gradient operator doesn't require loss information, +you can set this value as None. Defaults to None. + |
+
+ None
+ |
+
Raises:
+Type | +Description | +
---|---|
+ NotImplementedError
+ |
+
+
+
+ This method must be implemented in a subclass. + |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The calculated gradient. + |
+
conflictfree/momentum_operator.py
325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 |
|
conflictfree.momentum_operator.LatestLossRecorder
+
+
+¤
+ Bases: LossRecorder
A loss recorder return the latest losses.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ num_losses
+ |
+
+ int
+ |
+
+
+
+ The number of losses to record + |
+ + required + | +
conflictfree/loss_recorder.py
69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 |
|
current_losses
+
+
+
+ instance-attribute
+
+
+¤current_losses = [0.0 for i in range(num_losses)]
+
record_all_losses
+
+
+¤record_all_losses(losses: Sequence) -> list
+
Records all the losses and returns the recorded losses.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ losses
+ |
+
+ Tensor
+ |
+
+
+
+ The losses to record. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
list |
+ list
+ |
+
+
+
+ The recorded losses. + |
+
conflictfree/loss_recorder.py
35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 |
|
__init__
+
+
+¤__init__(num_losses: int) -> None
+
conflictfree/loss_recorder.py
77 +78 |
|
record_loss
+
+
+¤record_loss(
+ losses_indexes: Union[int, Sequence[int]],
+ losses: Union[float, Sequence],
+) -> list
+
Records the given loss and returns the recorded loss.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ losses_indexes
+ |
+
+ Union[int, Sequence[int]]
+ |
+
+
+
+ The index of the loss. + |
+ + required + | +
+ losses
+ |
+
+ Tensor
+ |
+
+
+
+ The loss to record. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
list |
+ list
+ |
+
+
+
+ The recorded loss. + |
+
conflictfree/loss_recorder.py
80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 |
|
The utils
module contains utility functions for the ConFIG algorithm.
conflictfree.utils.get_para_vector
+
+
+¤get_para_vector(network: torch.nn.Module) -> torch.Tensor
+
Returns the parameter vector of the given network.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The network for which to compute the gradient vector. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The parameter vector of the network. + |
+
conflictfree/utils.py
8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 |
|
conflictfree.utils.apply_para_vector
+
+
+¤apply_para_vector(
+ network: torch.nn.Module, para_vec: torch.Tensor
+) -> None
+
Applies a parameter vector to the network's parameters.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The network to apply the parameter vector to. + |
+ + required + | +
+ para_vec
+ |
+
+ Tensor
+ |
+
+
+
+ The parameter vector to apply. + |
+ + required + | +
conflictfree/utils.py
142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 |
|
conflictfree.utils.get_gradient_vector
+
+
+¤get_gradient_vector(
+ network: torch.nn.Module,
+ none_grad_mode: Literal[
+ "raise", "zero", "skip"
+ ] = "skip",
+) -> torch.Tensor
+
Returns the gradient vector of the given network.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The network for which to compute the gradient vector. + |
+ + required + | +
+ none_grad_mode
+ |
+
+ Literal['raise', 'zero', 'skip']
+ |
+
+
+
+ The mode to handle None gradients. default: 'skip' +- 'raise': Raise an error when the gradient of a parameter is None. +- 'zero': Replace the None gradient with a zero tensor. +- 'skip': Skip the None gradient. + The None gradient usually occurs when part of the network is not trainable (e.g., fine-tuning) +or the weight is not used to calculate the current loss (e.g., different parts of the network calculate different losses). +If all of your losses are calculated using the same part of the network, you should set none_grad_mode to 'skip'. +If your losses are calculated using different parts of the network, you should set none_grad_mode to 'zero' to ensure the gradients have the same shape. + |
+
+ 'skip'
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The gradient vector of the network. + |
+
conflictfree/utils.py
29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 |
|
conflictfree.utils.apply_gradient_vector
+
+
+¤apply_gradient_vector(
+ network: torch.nn.Module,
+ grad_vec: torch.Tensor,
+ none_grad_mode: Literal["zero", "skip"] = "skip",
+ zero_grad_mode: Literal[
+ "skip", "pad_zero", "pad_value"
+ ] = "pad_value",
+) -> None
+
Applies a gradient vector to the network's parameters.
+This function requires the network contains the some gradient information in order to apply the gradient vector.
+If your network does not contain the gradient information, you should consider using apply_gradient_vector_para_based
function.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The network to apply the gradient vector to. + |
+ + required + | +
+ grad_vec
+ |
+
+ Tensor
+ |
+
+
+
+ The gradient vector to apply. + |
+ + required + | +
+ none_grad_mode
+ |
+
+ Literal['zero', 'skip']
+ |
+
+
+
+ The mode to handle None gradients.
+You should set this parameter to the same value as the one used in |
+
+ 'skip'
+ |
+
+ zero_grad_mode
+ |
+
+ Literal['padding', 'skip']
+ |
+
+
+
+ How to set the value of the gradient if your |
+
+ 'pad_value'
+ |
+
conflictfree/utils.py
70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 |
|
conflictfree.utils.apply_gradient_vector_para_based
+
+
+¤apply_gradient_vector_para_based(
+ network: torch.nn.Module, grad_vec: torch.Tensor
+) -> None
+
Applies a gradient vector to the network's parameters.
+Please only use this function when you are sure that the length of grad_vec
is the same of your network's parameters.
+This happens when you use get_gradient_vector
with none_grad_mode
set to 'zero'.
+Or, the 'none_grad_mode' is 'skip' but all of the parameters in your network is involved in the loss calculation.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ network
+ |
+
+ Module
+ |
+
+
+
+ The network to apply the gradient vector to. + |
+ + required + | +
+ grad_vec
+ |
+
+ Tensor
+ |
+
+
+
+ The gradient vector to apply. + |
+ + required + | +
conflictfree/utils.py
120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 |
|
conflictfree.utils.get_cos_similarity
+
+
+¤get_cos_similarity(
+ vector1: torch.Tensor, vector2: torch.Tensor
+) -> torch.Tensor
+
Calculates the cosine angle between two vectors.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ vector1
+ |
+
+ Tensor
+ |
+
+
+
+ The first vector. + |
+ + required + | +
+ vector2
+ |
+
+ Tensor
+ |
+
+
+
+ The second vector. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The cosine angle between the two vectors. + |
+
conflictfree/utils.py
158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 |
|
conflictfree.utils.unit_vector
+
+
+¤unit_vector(
+ vector: torch.Tensor, warn_zero: bool = False
+) -> torch.Tensor
+
Compute the unit vector of a given tensor.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ vector
+ |
+
+ Tensor
+ |
+
+
+
+ The input tensor. + |
+ + required + | +
+ warn_zero
+ |
+
+ bool
+ |
+
+
+
+ Whether to print a warning when the input tensor is zero. default: False + |
+
+ False
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: The unit vector of the input tensor. + |
+
conflictfree/utils.py
173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 |
|
conflictfree.utils.estimate_conflict
+
+
+¤estimate_conflict(gradients: torch.Tensor) -> torch.Tensor
+
Estimates the degree of conflict of gradients.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ gradients
+ |
+
+ Tensor
+ |
+
+
+
+ A tensor containing gradients. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: A tensor consistent of the dot products between the sum of gradients and each sub-gradient. + |
+
conflictfree/utils.py
230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 |
|
conflictfree.utils.OrderedSliceSelector
+
+
+¤Selects a slice of the source sequence in order. +Usually used for selecting loss functions/gradients/losses in momentum-based method if you want to update more tha one gradient in a single iteration.
+ + + + + + +conflictfree/utils.py
261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 |
|
__init__
+
+
+¤__init__()
+
conflictfree/utils.py
268 +269 |
|
select
+
+
+¤select(
+ n: int, source_sequence: Sequence
+) -> Tuple[Sequence, Union[float, Sequence]]
+
Selects a slice of the source sequence in order.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ n
+ |
+
+ int
+ |
+
+
+
+ The length of the target slice. + |
+ + required + | +
+ source_sequence
+ |
+
+ Sequence
+ |
+
+
+
+ The source sequence to select from. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tuple[Sequence, Union[float, Sequence]]
+ |
+
+
+
+ Tuple[Sequence,Union[float,Sequence]]: A tuple containing the indexes of the selected slice and the selected slice. + |
+
conflictfree/utils.py
271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 |
|
conflictfree.utils.RandomSliceSelector
+
+
+¤Selects a slice of the source sequence randomly. +Usually used for selecting loss functions/gradients/losses in momentum-based method if you want to update more tha one gradient in a single iteration.
+ + + + + + +conflictfree/utils.py
304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 |
|
select
+
+
+¤select(
+ n: int, source_sequence: Sequence
+) -> Tuple[Sequence, Union[float, Sequence]]
+
Selects a slice of the source sequence randomly.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ n
+ |
+
+ int
+ |
+
+
+
+ The length of the target slice. + |
+ + required + | +
+ source_sequence
+ |
+
+ Sequence
+ |
+
+
+
+ The source sequence to select from. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Tuple[Sequence, Union[float, Sequence]]
+ |
+
+
+
+ Tuple[Sequence,Union[float,Sequence]]: A tuple containing the indexes of the selected slice and the selected slice. + |
+
conflictfree/utils.py
310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 |
|
conflictfree.utils.has_zero
+
+
+¤has_zero(lists: Sequence) -> bool
+
Check if any element in the list is zero.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ lists
+ |
+
+ Sequence
+ |
+
+
+
+ A list of elements. + |
+ + required + | +
Returns:
+Name | Type | +Description | +
---|---|---|
bool |
+ bool
+ |
+
+
+
+ True if any element is zero, False otherwise. + |
+
conflictfree/utils.py
245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 |
|
The weight_model
module contains classes for calculating the direction weight of the final gradient vector.
+The EqualWeight
class is the default weight model for the ConFIG algorithm. You can create a custom weight model by inheriting from the WeightModel
class.
conflictfree.weight_model.EqualWeight
+
+
+¤
+ Bases: WeightModel
A weight model that assigns equal weights to all gradients.
+ + + + + + +conflictfree/weight_model.py
29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 |
|
__init__
+
+
+¤__init__()
+
conflictfree/weight_model.py
34 +35 |
|
get_weights
+
+
+¤get_weights(
+ gradients: torch.Tensor,
+ losses: Optional[Sequence] = None,
+ device: Optional[Union[torch.device, str]] = None,
+) -> torch.Tensor
+
Calculate the weights for the given gradients.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+ + required + | +
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. Not used in this model. + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Tensor
+ |
+
+
+
+ torch.Tensor: A tensor of equal weights for all gradients. + |
+
Raises:
+Type | +Description | +
---|---|
+ ValueError
+ |
+
+
+
+ If gradients is None. + |
+
conflictfree/weight_model.py
37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 |
|
conflictfree.weight_model.WeightModel
+
+
+¤Base class for weight models.
+ + + + + + +conflictfree/weight_model.py
4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 |
|
__init__
+
+
+¤__init__()
+
conflictfree/weight_model.py
9 +10 |
|
get_weights
+
+
+¤get_weights(
+ gradients: Optional[torch.Tensor] = None,
+ losses: Optional[Sequence] = None,
+)
+
summary
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ gradients
+ |
+
+ Optional[Tensor]
+ |
+
+
+
+ The loss-specific gradients matrix. The shape of this tensor should be (m,N) where m is the number of gradients and N is the number of elements of each gradients. + |
+
+ None
+ |
+
+ losses
+ |
+
+ Optional[Sequence]
+ |
+
+
+
+ The losses. + |
+
+ None
+ |
+
Raises:
+Type | +Description | +
---|---|
+ NotImplementedError
+ |
+
+
+
+ description + |
+
conflictfree/weight_model.py
12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 |
|
4y+u9TCdKwu862?C? z9|<}v3TC^NNAH>b6I*{(V86{mPlzT8A4j#=D3s4>)*anhu>8^NNeU99%;9Ri83y#* zGncyx=e1%$HuxAytepUU+yQn7FmW(RA~|AMO~=kM4aCzTe-=&_dPdF?O$PI#AC4 zqfVU|Y|M3kz`A6E>ShVxu}G)1ayjpVA&?g$<6llj+)%$G&8ih+)FU2|L?rDo?X0!C zF_mj>7H?Kb)IdxoAB<1ZQdUnMIl{sCwpYMB{NvT8aXgwCD9Vvv#K}|o98YyW#z-m` z%~?pxkXtw(6;&S7p<`x7H|S_!(iuomOb_x@dUXj@I8ivlu~1ZAPJ^SPwml}dN`BZ= zR;Ea3_VK_6q0izv3z5E!&G<*rRI9Cy`TZZ-z0mX|RTel|iV}4+SdsN(gGJr(KB{`E zgIKk#=Wky)v#sZ*nHn&$6fIT-*1Hd*buPFM?U3>_$DL8e!VpK$Fv2Jw-VDL9u!232I-{&6{m?1XM{mQ<8GJ35Nl00xxLje=54rlq~u{8W673c! PA4X0=b#5<{u7$y z?5nDH3fGCA^5f9iKY=niv+S@cx&*)NFi7mnWpz@pEXH%b73+l(QHkH!5n7#BiQW(c z1YpL|_6G`18pK^`o+dqa_0y?PjQj0X!5E4a6LH9Sn}Xc^BaqWe%pDy4F2~1*-$FV> zQGduKdO(x*u|kKIg@T;)?z`t_K>EA+mhINB0m_fpe#Y(zQBGBk0YFc~iw?a{FY16K zw &ajJ>g0PeFrxcuSH=g9mjX#NMerBM52q)d3%#sG0!7^qg#N4vOOX+pUewb zOl6TPeWw`G6Y%JnZ45=YCj6)HI-hqGHasw{;-yeDs?)VmZGLyVMM3rMBmT{N&y`y| zLz+jf@nWehY@KMiB1 Fjr0L@~>XL?hB zA>d~8Tb$84P85G*oJle}r(jQw%}me-vlDupE9Eaib(ZCW@Qs*7sqU)Zh3R__y3|Av zTW6Q;PVR{#1i35{aEDGC4Z0zwcV1_}T>sB>D&jPOI=4u6T8otUA_qB2?_qo$kHkz| z70h0XM4rX#Km5v`(TI(uFhxa`9D`|d>=G0&@AW26r8|6BwXBHL`KnSrfJ_@N{@i{K zX|lmmuH6@uE^u>yA)XRgKe*o}KxHI@4u<(iNVs`2{t5X2_dn*7v#2WZBj8~f=9(q5 z)O5Vaw%k20h8#%*?@~^1qQ@1Jb9~tZ(AZW1d^Rw|cDilBqU*+oo(yx}rTevdk0mo$ zCo}2PnT*8yK_6^Kc|~}G)>ATmAX@VBEs##jq%_M?HR;&qeZ}suny71i=$N4lDpK08 z`*giJTf|)<{%{;%1hRChGqSA)Yq<<0f2&+|ki^a3jxdjHVA)P{n1WY9G*oJ!G;h(4 z!JR9<|BI@7BuE#ci1+dsnK4cGn=S9Eir?bLNx)efr!+xsU#D5Hp@1cy8pOl-?Zo`RG3fN}t<0oi(2rSXL zIUqa(tF_osk#t(0ZUG&*y=Q)6k?E4d_bJB-2372^3O;O>4 v!UDlgBoGNJmdN*6@iV#v#K5>;;xqb+S1Fay0*h zP+$mhjndtYzW8C@F+FSz*E#Y{wboBEWVT&;N6S!S2;w{)LrWgI)Hk ?Z{9ABri?gYd6mJmN-w*OY|!7lzD#15M;voR%jKs=4IqvQ)vt>V`P%nmz C~(A6U=v-Y!PB4 z)} + 1WfExW0D%D0B9 (!=smj6n-Q6;kE502y|c9@VVy)fIJ8);M*hs}tx10+@tED@o_)MV z6U%Un2XC(8MzGu3rn>aJe!?yq0^&k&_!tbiyW4_TsF4bfTV5W2UmpAi1RL zX)2e1jtEAJlp-4601*AZws7l~y9yKX;LZCD-*Wutv! J{a%5ffo!1 zI;~m8=*^>U)02%Ol_Wf#oUHwYNJ?H8$xhd&G((IUpW$GhW{XQoej(q^&R*{hA2KiI z?EPBs7L-3x+Q@JeC(*u`c* +$bbnW;vJsIRd8F{v_PSSEKL#J2d1QQcx^yfnG7X#5f@6 zV+23@%ICFQmlFTv)XWk%L73BZmReER0q@~m+D= -V_g^WAaU; z=g *DTu?A0W`0 IWat88G=d^S8y$bK9L2@n&vZeR3q-v2RAR;9wewLAW~M zOtZ#&CIBX{oK)x+Xxp}~xX`b0M5rEo!}*Lw(q4Z?CO^=mR*NCbTtgP>`@rBNr2NL1 znV)j3+hy)Y@U9uTbR~5@IXw%ilf1m^_?BQ-winmP1ZA;9WqafG*AMg>`ym}ck*e^P zDt9Ll<(y~DOxc$SU=6bGuJsgDtkC=3>@VU)4v3^k5sb_^GR@!NnBp6R^|w?BNHsjg zoZZ{J$QaoL1lCQYS{;90trDfE5X$>wiO%eOH}Bu*^4&T=`|LLbsCT!{gl&%0n?+hf zv=o2q7GBveGQL~ybSI_U0u^f3>eq*e;|;Y`;JGQ-KJBF}(6|^poTSiHFKE0L^piy= zEv4JOG#ZbTmB_~BH0T==c{ZJYeZ`MRSMXCj9(Ae7xPqF yp3UXu4MmicR0Fy^U0zGI !_i9=}Gub7t@2m zPGF`LpOh;^t7(tnKb(Uft-vZr)QtbEuO+m36{z67O(sTnNXEcZwHbC1;cnS)--La? z3&&8;hcO22XqbS 5!AK=!pdnuWvioWG;ijO+2XzMB;USFQSyCD={py7&;efrV8oRDd tb1II0_Ho65S7h7S}xU&O;4FcStT|D4x01~F1JVqvvFdE zA$ +k8xQAOy>MQq znz%}q*XP94_3Z3+y653{*5P<3UzWtgrz^dOr`oL&1D%TUaZ~gua}Pupj;DAy@@Mz6 z;l}vl#LGCLcRxXHS*fA8qps(9OTgjkP9Sozp1D$Mep{hezxTJ*`i{jm;43L!sbaYW zc`Jagl;u8z-FhSc?(f_8ZH@Mu2q=;NIQwNAeFap+G>o^KYj|^Dd;SEQEvdTY2Gmxv zoh#k~gc!|QuYDAT0QS2tu@1+VN$s_zl=_zEA8nS4^xkhs%t@>h`)xf>xMNk{qS?=s zS^gH+7ytHV9-iLe_9RtDV@J8(7Bxj@e+&GD5FZCqf2w=FL9`YCx0sCMMMc^-s;vJu z7vn~l&&oY|A-$iM0k5_>Plc7Wo`D00ra>P{ih=RzTW`mFp{d9}rj^siM|y}9R&PG2 z$Jb`Pu)R0LlK8r*q(2R!|H@>gV& @7w(e7k2dga<&J#tAN10{ru0ZZ_deMQ@Tft zf5-}QKb@jMpCM+^8z|k30QMrCpjy!*e;EO;ozpNs;&2WO>_YOeBTbfnZ+#QiyTMGvl7MrjsL_7Z{GZ->Q9Kvnm&wz z|I9$9D1WlxWqD}!Wemk5!qkX9?A=h?$$k(F+MYP6ESKd2PD=LI4_~$WFZ%Dz8`=Xy z92_5KRYSw0hEy*a?&Ow|;qowb&TrpGWGaA$QzKI0-5w7oT`pH<&mP}d&kslHyF3cz zO0ohDss2e3y|OTX#llW0jL5w+NxWR=aW_&T 2wf<)A=F+{H2+nF1YgGM z*9ji2EQe+PE37vPiB#15O4wlMIy=tfF`8ic+}JpULEQfpN#Z_|-2Vc1H~kQtdH=on z{~n%8={NjRsa`&Jm`Gl#t^H4x`BivB=u+`kjjI-Hi;53u8Cj{XImSBEv2in}#)kjJ z4#$wbLQ_|*t0&Ug%R@%}maj!Ox_n*_ypYo5#ebREWEP!sPN0;tdWD`E>pB(w`;s?_ zC`E(+b#C7L-}8K1L+AFakl>z-^<9dFnp (uRIO7YmctNJRWrG!*8vn*} zs0AvOQ4k_`rLuJ+E%Ic9BVXLcqXe2Ul5r4X0R78EuVy@XB)U*M+H#(9l3@^r=Y7B% z>c0HPGh?Yl@<_?FirX{PH>v}D5@j7@MoukS`kvFsUtiNPuRvfGY&p6_X$lH2%&O;9 zxAgoel1_9m8^;l-R;wmO97^unUc1SXkrz|F;e2p;DoSu^#cuKpWNNsY9kxEv{0?d2 zt{p51A%CqgXPM#-o*KCe6(L 7yq2XC@j^hrXIDdE{tujU$83!;xj@y^ z{=M4z1pQZsk9{Z~DwUP%s5Vc 9M2CFkf^Qm`6%|bCyRC^-%fVHP-;_`(SOnMfCVH> zAC@_`pL`gl#>Wv!;ctrDJT`zFpi8h@S!v`2yEBY4*Of1x+p5vBr7z`h_r!PMspNqe zk%bW+JGN?uxU|z#@ FOGIAWxo8*98caV{K&9{3e8;Hy-8q7s|fh>3!*F6kxVYnY4Itfvb9fJ^Q1 zX`QQfnCBF=`L~Be6E|>_mfwf(lXl=tP7zEnTO~)B+}{`veomWRFGKKzSRh1}@AA%G z#x1KQJ<+hPz9ZESj+DBwCr3?LTAC&wBX|B0e$g7#Bm>Ff*JeY!xKdm?M{5ZUCeldm z hKH(w%f>( z$;tIQrn;rZR}<>{h_c>h^aw2&Koj1}e1otR4(JPb-k TUI(PL?)6$2rx`NbkVoQf*!omw&T-yyOe+ZwOgWo0kX{M0GHP5w*?$ zf{sugU=C!iviD=z5U=#$yB#($3F6j6M2odrmG}8{bZ3)$!y*W%P`B%E3*h=GLHrHD zjwYa{(s oE^FB?*3S3pl@lT$~VhfKD^t12J<@VZ7IS6J(vnxlfao_ zDlnx#pOwqpO)hsitM*at6oparm`b<0`ECg5grbEs7Ljz2W+eQ&-hecR`rZu sDD`$Cz1oC$5JhWKu5_ zO|O)5?;um-=+W9Lhcc|ONKdfV@+F$}Fi&ZWLZ8>?Od0=p?s3JfJA)@zq2=rXn)bme z!$hIo$#Wws^N|Vd4Bbd^NuN_vmmzcJ$=AK8)nH2xp!mxB(=DqTrjeZiUxV2yLFe&P zndt{%Nup`H-AK7FSd|^rY6|P@0o=C^z@I+Q6irSGXq4*H&KgMhkL=__Yv8dbJAM-- z2W+CL$^%4=;^7n;Rt*irSmS06h5CYnnd>Jt%RA4hKG~18)a`+rB~XuWi@u^8mdri$ z={2p|sfx82MKgJ{2a7W)q=JJc5ynCVF@tgD?EY+T!U4nXhW-$AN z`$!p`qG;d8l{+Z6dEeQ60-b;dt*+d0I^Wg6Xolu)#oaCJWv2ec;MsfRL6+E<-b4?` zZfv4M5 Y%wp?xM`|SphMYHa(zaftUK)mvr}Xns=n@)4^_YgzQQ&n~}hW8I>Dgl(x6P z7=t9P@$FH3t?e@#OF#(>8b^p=fYlfPqsU`@IjRyAz`q-q-5I%+cudGEv65E!ISzQ( z8)))}i@_=p$4br&W4gcOwtr^rl5zKKQSi7+ZMYQ sT=A()EXjlL;N80&|P=0 z-nLWQvCIEwnB0a0C|O>{>8RouSAMf#ucKH1Ic}d=@P3=3IX1yyAh2-zAd!+0OAgxm zC9tTW-Qx_oj|8<*(eV50m44xbT|3?}icn612*%VJvG3O3qYQE+YcTSYI?^ATmBn*A zQ2=xwciJJ|?c^-%cfQXpaqb%1hZF{~xGy6CKFtwXh_}OVkQ u-N{13InnR(Jl;U 6Dwz0<6>HY3zxd n(F|PNcNrI*?qtAQgyG95NRCeg zPlX=$8R%gdo1$#M>CDG4HAj{J!N55j^Mxv%J#<}Hi6lR!0Nr?MM|p`otPS>FfQQr4 zTC)M*{^>$hBPx?+udGR4Ah9PW|1gipyY$;}KuHE4Yl{frcgORGY@g?BKUe)IR-^?r zuf}}dE`ct;yD zfo$>8t3eQ*y;{4O?~kFD#S)-QtNJJnK+<$B)aP?@gw3SV>V8)3(F^6fQjI+vj;BB{ zI`O^%tS*Rbk8?INXxEU FS+LIT_t2(>5kn@{9iQ(PA^MhBs#rv- z))D;$S(>dEy6VyG;cyliviBFAe_>|NEjPkOjad x}$YUr>1OA3spdy3P?CxmcDc`nt$5o-6G_`(B6Skw!etH_x zJ=-n!r^@1Y9tZv>HaXHDYUH@!YTQ1an5 }h%N-&v}<8W>wfzdH#y79@gFD${lN&c`zW9e@qZNSx8h=(;K3@tMNJrOP9 z?7HQOU=c80dYb(86-D#NcEaN>YX1 ZkN&MiONwI<7#$4^gawoiF(LA4c!b;O)`56h7w<&Zp{YG39&s85P;%3Kt4 z3N}F1Ck2DPO$E)H6Iu+krGj1e=BNF7Cu1YuI|G;Cy9W3x5RY6Q{o??(gQkQhoFr{4 z*};EypR9J3LKs Nd4O!eeJ*jph=T z7WI%}UX> }?|GEO zEOYVG(>9=JPp_EScqDwaj*7I#jyh^Bxq>xg(ZH0X7-a&6D&gv?Yolv+=!QYV$_RZN zAgGWwoj;DCaJ8(IhUBUxuN^W6djLDv>2MfUomnQ;F4aUL{6%=&DM>WDU8foep`=w3 z3drN;VvOlG5N@?*LxpBLMI#=v@I1-Ox6C05QySTpU$&}+hFwg@s;-bqZ(-R6a$DPH zB4Kumb3h#_oa{i6F9(%O7ugGMoEDv~Heil@{7YnGQ82XL0EVs0+rtg0%c?F=ZbSmV zFZ~vGUdPQ5$|S?6Mf_5IB*7^bu3JfI8r+@vA!DKUiXuzkXPCJjYWYB>F^7%~DyN1x z4{F2%a6grSgQ`!)!vK}uk?~7LdolShLN5}bUM_q1^(P16GpOda)7?x^!^wQ36PW>C z?}M0(aBU}-e*b@J40fRlMor6vvbB`PaW3G-SsRCz<#;S~u-yPxB~HY|NFvme;wPis zOGVV3s8)i;;En1AXKj39*3T_;Gp%T^79M1}gq8}Gxo&;DFSnylXtMG@mFQDhKTExv zx(d)do4|=^1tz(3)@;glcbXrWcLPc<$jOplTOV4LGaNQtab-MoURy2Qg}gJF=3Wu; zQz&_wixUOIq)E4B RqQ9 z{|g!{j~)IcoI;9rnLs8W_KAqToJ)80y%+sw)j)I{r!gAHh`^x7r?Tx(H(2h4@JPUS z9Js6 @z#(c7q^ zw|zVhCDJCNV&GOHUi(XsUI-duKFLgjx$=J2UP@c%mF`FumgFz}jfQ=~%Ojrsg~gZx z2DLyqiN^(Y6|e+Ndo ?x7)-$fwzya zhEh?KSx{ioEK8X%CgaN67f&8;+DLjQpgbw|$yj%BLIHb}TQM?1U(89+KmK#L&1B;Q zokqp78?y*~Ct-E69xvxCXtmC8r>neZXg^C`r9U%*4z}t(yIA6v4UgfQbgGOCB41m| zi6aZD+umW#6yvqqx)i*8{^aBbP 7kPf2|Co=VP!F$xV>v>HqBpP9<^yst zj`E0AA?DNWXAOPBRt({lY@4`aHeW>_O{0297Vx|2U tD#m7eWp= z!ic1;62D#;J2SoyEk4(tgulGiD8;`T_WUgKvG0#j&lUzgTE2hLS-Ji;;bKMqSR7Rz z *d$Inyqn;szvPP z){QeJt+Mb|na4G(+nmlEZC+b}ehFJ0xj<&-6+)G(vpNCkwuV6gbid87pnzjMcI*@v zx=Us6CIHQS|NM>Tt=)cpu9Qg(ztfpr@_?FRCT)vK_M$)l+5UKPW&5Ofdk6lFm+kyt zGk^GWa&$~QYx%z(Xkq`w0z3j#5Mo~0GINnm?Co|)0r|ACqh7I6hs&QZa;Y#u^t<=Z zZOi0idYDkp&<$19xktimsld-3Yu~7K&`1_lUHzC?RBXKA2PG-V$}QiD(6^pdW;N|I zR^44#1Hx0n#G)N|0rJi46T*x3m{mpqGZlHOwF%YYQO*J(i$5u$P5o``nkE4C@`(|y z`t9xdH~}uU`K39F`SRTl!iX{*WG{<7-GZqN+r-5eW)*lNBk-cJj#0(g`ulaen!2sr z0s#eyRsc{=yi~=UQ9Y{%8Z%P7atRwxpt{hvAc`JaP9R~$Z%J*_>+5ldX$*zG%W4Hj z`$T+}X&p*K=nUsyh3c6@m%GY_z*_3>cEx>1zqmh4J(409b2D($FizMZ+JnB2i|DNq zP$E~%Z8^8&=?e$R%F`tx2MbrKCp9h7C4X47Y+PP@Cyt0w@Y;au!owhk7};m#w$A{T ziDi*4U9@tsN*~uFp0aCf=L*Y7#P6Jm?S4c2nsKWIo}gwdWTqvLP 9c?uFhbrP3$tjjD|&<<3} zej>eyoICrqT J>0QKqJ?S1W|k_6J^0dw$29WVy4^XDOtb&AVhvQ*&1{ z hG+1bM$I~_cj5E$eZ67n7l9!n> zssS;5|GP9KQq0)&r>viqF|^N;HBEM9MP8}s;{4|F_TqNF3_Cb#5Oa!|vA8esRQ1W- z!^*VkWOunmwV0 z{$m29C%cy<2mAh27Agu<{3PykZF~F$%NuXG?OFQ9Y1%0T5MUFqFO _4sr#bqTwBQt~Tb*-*lKk9y zoR<850MtM$zi+e+)IRJ|&SLD|eNz8v4V4Nx7W3j GHo`3)EWXq{Ed1;k;&6BmIRJr z+%CJ#<>HCJIzbA~ZO_TdOiPS2i$?#@I>7-RdbJ!$BJZ|a%%-gLlytMC@Cj)c5#XUO z+41H9%@%pa<#4zh4wuE0=^#|zo<0VHR;Sad?jKhqi9F-9+pK1@DLco)AWG-q8{qAw z(U!Wrr}Tf+y>(y|*Y`erX5F)rjk~)+NJt=fkl+y9p|}(%6`)9Iky1)&p%f{l6n7{= zf(0i)fB+%xzR6}?W_M@q?~gbHZTtDY@A+@{j@-F(?~&)8d5* UVsV6yN7jLIm%6O9&+@RCyj5@WdPN6W-_!6<$%8JkDahOeGLK0?^L8sR% zm1ZV~XKQEGYB7;!qh71gYPE{$>S~RNZDa3fXCvnGnx7L9W|Pri&}(YdMmk?)FX0hd zjjW=$yh={6?ChPL?WKH9y|pFHdPQ#Xu5CN_Udd8eyQ=G|%gf3*6b8@AiqCDXXv*@9 z2nqB&^ZWTr*P H*J;#R9c}>%jltpZxojq#iUA@_M!jBdFybZ)fq+7z zGgurRuff6~(v0i%I=!Kxl}e*C*<9X#Qyl}D3oaZ;dR)xzGBe1@;lqoZ(Wq2 FIAR+|sYqaD#pf{ISumQ=$z@6dk@>t>Zy}Jy ztgfgiH*%XjB!ozWYbq+t&e1{k&CU#IF&cC_txhIKp1yV%uBt1^$tpJRyn_ARBy1{% zA+uhoP~j$ZtxRj=2pj}_8Xco?tVLWZ1|+U3OZ{WktY4oSNMzQ?%FD~B6pX^Lb`ZZY zg$WTR+^E;-^vbe2%qPf$YcXmR3RzWQUb&iO cRx++ h`wKWyRc{MoC*4fL+Q7q)v? Y7t2z4 zW^q?vQNy!ibZPv`q?gq#m{dqym3!g)shwSfJiB(I)@ Q=`s%d9D-y#Tcnqc_u;1*pJOA9hW_sU{4!u9y_4m15iw5|KtzuUu=hm7KDoy!$ zem@^clT~H}FFBiAV?>&le{WqrF3Q1***Mya*zax@E6fPt8E4l`>FFV4H7OduPfi!r z7!g9K^44#kCx%FyjlAQiKcCz=zPOh=zj0pDy(S&XsWTABqOW~?U}1MpE|cTgZ}G2( zj{miCM0;x%+pX7E$1-Fsp9p{QOCRUP6(H%l_I8coqp3(p#i{YmOe#i=T%PtS-I6K! z6L%T~Fpq_I{~dB;!mPeF+a04ao#$LPzS0j|U7m4u|HkQqJ9$Y&-8Q~ZRc4?2VN#5{ zl+R((C^Uih;I$WvT9d$#B(5lXa%R)qp`F4bBErJ_?QKLnTc3payYJM~$C9|F=)&^e zzIIkj41hvo*?0#81O&B<9kl+oQI&OSX+oe>$mjE|Bz6G_b2cR97){#BJG);^~ z@c9Cv)H!tEoHhGX 47aa}xx5_|Wc*!~kg`*!QGYlUrdB$*V&w|C9%=3{Ne z=kvu59v#Lk+nG{i)K
we1GPdtiA{UVZn8(%KT?n_ph5f zI65Gr*MzN4TE9|@u{QVa{%@xx^yro_Y*b>u&LQ5OK9T*#EZneT-^sIQFI>2CJ1wW$ zh!8?0H})@xq5<2d*PI=9^KKvbW >M?MJLW@wBF%-A`L^WABolJ~n(N z1%Sa3Ik*J`1O&&9Te>T?4k1LCltp(Ater7*VB(ZTD_1XDG%0aFkJz};OEw+Letib3 zpC0~Zs0Y`r*Mi^ft3H}vh_o2BwMCDU4{rK$*4U9Fr+m74&-sT1b%s|~lSNaWmzQ5u zWq8A75JHGhJ=nFVj~my0@XCXa+vG;gJh*g70J9BhOv0qA&AR&6+L=Rpbc~D$@%MDH z7TS45j$ZNSt=u|ev-Tj2nxcD0*3KE$XW)n_i@*Hp^C^Qn`uPQQpR?uf7uEGFScFNh zt}D;DaeCWlV> xnp53uxa=&25fT0_b)9Y! Hp%0lngfjvhC% zU&ml?FW<=CqZhCLZU51~lael6NPb!(e*@zqfoqkurB5#&_ `%JK-%S-^I0`GBF(z0M<;%qHMDonA=4Kv|9a`1VV!+ly*rIspOjJ6&>KAe)MrB? z-9!u;1%M)O^6>T#2nYz9xZ`n|%7PF=7QLe2_Wm_95;{e _FD@1-@PDdpiPD{k{eMi8>ntX9>9gy9pE!8nuS3U9T}gd$J^w|Et4j(vs;dc71y;C|kx_0~Q?~g2= z@KHjN7Ne$4R$iR_FeT~qvEv619XWpfUd7uqfJIaJXhR&IDfUX-_iiFZ(kRb8w lKS84~X({3+prYx0|I6)MO-KQp z_w1cGe*B2Ov7zp+Azel-+jS+gT8k5{E^AbmJx%_7*`)rRJ4A#9`Fq$)qz>NQr+s() zVMRUHt0MEt_IX`H1H9ZFMH~z;24Cdp6A%y(5D?sB;_@BG&hK8_C)m@~-ii%?F$H2r zZ~sQ|^YwOdkZ>3PRQLF~J07)C7o^#sEPkA{W95{=1N#gfHE#UyJ{ l!?8(bkL4 zYw9fnVbaQS@1NVYVn%$JXK>$n+b`v0UH^00=uX}?R%`~1F7WC(d+U{KMg1z(<|J*I z-Q8QlWl#ZVJgI}bUqC=e$8j5u+^cEzHVkQd7WeaHyY~Nb|HImkv}uG<_Tuc1^M^$H zI@>#Xg~!iWd*p6zo!+2Rl|4)TYu)@Yy*r2acse;ay81@;nzUlil^2y-lgX&5&boPI z gBx=tr>gFq=08s4{x8BWFB4p8*J@|9!_<{Y$&iZ1-n&pco_wU)ecjwq? ze>}-kw79WZSC@Tt&)3ric8-pW2=;Mx5Oakt(TQu1rIx60f WE?%zmLRHX;Gv zI%e6092t%fLZvCctz9&sSE#FvNWka2kK3PFrfb Sr=8tAJF#cy*j_^>OrJ6`KHSa8B`kj8rqij#bp{Lh_7Sf* zKDT>^5Pt^|oq_>Sg|6PdO>4u?&)Z$Xr(hTWqu;J_RlUkG>lL-tS+@>tSuixpO&quW ziLB_(p2^X63@Qc;_bI2U4QA4!E5Eg4dcU~d;}(4V-P%=)NA~Fy*E7E3 >GzS&l(ozDPVWp^juS0n0jdCkSG_i zNWfuWn9yhNPj_UvMK7;-a(wNiuEF+VkrkIgrHcKBZMmyzO} WW8uCP%qQcys`a#u7OC1_T6!bs9YTxAdAP$Jfjp5biAETS>h; zPWU|wA%t*sb=t}GbI0{*=Oz~Ltz5% ?8Q{66)6Tfg#u0Z))-gDP|X%&uNmQ~rE@ao>2qA=LZ5 NH fVL6TvVbbSaT;JD&L80?p`fR){HzR})Nm}G(sjH(I zK;uihe3xc48jU!PBZLs4uYI Pj?`;rQj?`HsVi=*?IfT8L+sLTYwM`iSX+>^ zD8jSjf(!N5jx=j39v}X)r;l^!i1pVh@%oObi|+k8G1{8Lvk6Pwlw6{>Ad9{(_tMX^ z6Jx!_bPQ-B*T^Yb{#-hsOSjIEZel*qF}T;7yLcP$1R^cQnirS1Pv{uZYsThl1@EM8 zHr2em@Lex^Do~_BJyxbPA%6O@v;zwxcog7y#Y{g?*y^Fwxi@!Ai;hnG?q-9TCrpa$ zl--Ml^&h|H@5~w#LU>(4^0ryg4m7&Ruh*vspQ$VeX)9h{+&Xt;w_sN*8UWq3Q)_sF zwA5u^`Dt2|Pw0T9M|1QDA!GH^W8Vz);!*&p435yo(JeG##*Zmg2sObIEVsyhV-|do z*fW0A{AFv_eLZ7Pw7Y-?T=(v?w?9(WciLjX3oh?m5F-GpThC8^xvw!AjYhM%fw2AY z;Z?(<<0h^@Utw-|?wflS_3SzL(?8PN;E67~wsT%5E2>kEncMEvecTMU7;0Z$+%mVP zkCeq^b2uC}i$P=9geR^%_Ed(qR9S`Re%-X`*E275Z@w0(xVK|&ybDzlJ8Q?4w&S%o zz!Mf@#r@r%PaHbo>pgdhl@^2y6{){3?&~k1((F3U+;%x%-4L*tO0R616yszYHFwvw z;`*wH>L)wr_42fmcKmej^ fkX(IO7HI5 zuyANcZ#y0h18_)K*9cEkJ~;9F>IvPvZMhVndGuSde8!})iKAzHzIM%*vj( HO8PM-?q1IuftEwr*U6bHLzr=Q3+?gpe-x(2@i{2@?aLam2O`P99M+_B}3WyYP+T z$*CVd8{WZ9%%WldPKoP&STt$WkUlYft~LTL*L&>lr$uT~{do7p_D+1Su{$0$+L6+e z3!>euSniW|r %8*8yYHc^%=~jgyP#2j6(fX@sjm3S@^(B7XadL13ok0$4jM@UmuFwuJUrYZbm+Iq zB{)I|Sqya k+AP5o@+ z-UA1J|K;nkaV|`Z?bvSY=CpSfAR|nw{9Au68PKuInAJ&{N`w$1lrNJvC%RK9T<1;` ze@vAV4F@W7Q+6-t5fs>M)-N|I>$R~__9E%~QSEHFLiesye@Q8BDt}9u6}eY;Egd;v z;)#!MoFEEbE=sLQ{+YhDL7u)X4E?@ZSk8x4ZR zg6nj3=_fZ&3Zwxqpx?qjp2}4!l}e?mtE(w^a{7m9ogFZsdi7iIdwPo}vKVWgp4vDq zK5o$bZMO 5g=p&&5)jo2>jG2q) zjTts>&eyA!FB#u6#F0nC1VN*|J)YU%;+xIp!lWN3bhH7Qd;ib>yjN{B8jTjqYa3IQ zDL+l@VoS5_wPep7*~j1$Bwl)P!_*#~`p)`p>vyvUgh**XlSX{9anGS$%g1&Njp#9A z$q#$aUb%X5%Yr_EwhRE4@9>pJuis4CI4hxjht9(mt>1I{%8kp1R*jEwv!VdaCU)^3 zH%oQ@@(S18{eEPmBZJrF>%SjYwf&fHh9^j~uJG9W?mj{)MrHd?+ 7RjSg~jT%)$F=)%CqExVB;OxK6(HiQbIp z ov_;RSm}xCle%U8}+y44mr8xAFE?U4AZT{KIH0{-p {qIMQ9^SiUes>opMxhI$S3hp)^jWmk54Md8klM%0K3rgIm #xVoycqAcsy{?7-8*>V|l7GEfJjvT!F$fI&~%kbMGyS8FrfRqUU zn9qbi(+cZ7{<<^sJ9$}A05~1KOnP30lL#R~{dnW>P+K+z0GRENZK(xKyQKHN>EmPF zfGD(tCs5&@k$(2 Ct)Jyreu9lf|{^^V7o`Med$NZxIs%09Nex>9Pj>_;P)`4UY~0_5*h3)-_^P&(_A- zHD&)&Sm9sYQG9IiAJx&3HcTpD?CwjC{+)E8m7G`_%b{XG@t$|1CAdkL3>rmkjZ7x1 zsjXA%@b|WWq}lLjxj$f3hR3YiZMB>9nH!^+02o!^-fi~b%zAf`z_l-b?Qe}?bgt8g zL$yr@EAx&_3$Y1XaJ9DS3Ko4`>i02Rz-T=Co o~2bH?iIvkOBRfU)@YoyV;{ znyEIbif GcK;{V@qM!g%1AiR!gm6s(gHQ-T1!Ker*^55|=$ayJmPy=W$!onztpig|~Oj z3St4pDxlY=$MPDltI4_k>jXdgYw!fA&A+yNdI;Axa@d+{jh+Ef_3+s8o>B_My4}!~ z7b;rxp#h$tvPHh#W^PH!Ylsw7S(i3V3}yk{Id0mPJ5Aw3`OUu;#|ePy7C-aXCoOZT zN^k6)ALkZ2e8q{PrfG>6U)lA;%2m4`wK0CWXUD!v^rBJS`^??-K=XS0yM#2Wv(NrG zy|b6Colm &N%(G3AG|naxk3d9-t0ygLiPy3>N)DfKao>G_^d`+Bp% zw$Il`QX3bJ`Qf(d-QB3{cC)r$Eo{_V=A0v+^$X+!@Lfate)Z?Ae1)+r_0XoTKK 0_Qs~MQBF)?htJu2v#?nSn)3F|>+U0H1y3M^ z5R#qyqF=C>0t~TV@2?KrEp5>BHCMhH8fM1;cKd}#ACxsMEwb*`cZp$kEU(c!?&dZ< zYVMwyF>ZVeT!;R0CtLYWgJ^oTZA6G83qa@{J$%EtOqtGDaCO_)^XG3%&aTlDs!N}B z@e)z#;h*S>5;qRVQb*bLC^ALBBpJYN~~dU;i@FU4(&T_^#(IJWXU#++Ne)FKG0R zdu5tt!&Q8EQiwf=0(AccCvs{HuXPi@x+u_|kI@4^zfz+3XEoPM*B8dkI9aYVk_az< zcx?9%Yra{v>X)5|l5X8TwjjpQv*))rs b@9%fQ=fN{NMcrBuW601lP0}JA+M3iWHs_Sl}2y+cPf>Uq(NSE z^{26s4kFKPGxyvn$KQPpLI@GMn#^r|co<9KKKfw!yN cz1Ik=AxWBe!O@=oYCid9q`;3)UK*Fjl@iyNE5qZTf zNp75@x{~yL6MZm_bI8~~vYLBS^D=pL920R)?&%Kw0&9_4S2zWzdBOTvY{|} z`;2yMV0iUk{Ch@=_fT|o`<$+U9Y%k1rm#8Ft@i$brTv^~7|p%U{N0Zl*19D>>E|gC zf>+=P(o}wH_rgKlhJUg5akKr@WgJ^GGJp#p3Lm%bOn$>VQ>OmDq^~m-Xndy*6W1M0 zm(~BTe0*@}AU`fJeTJ<({H$rTDpIyg>1qQs_rWU;Jbu$og*oHElEHp7s#E`Ee?6#s z!$EHmLUDiF?7p3ct~{JtV|uw~QGz!M0Dvd5wswzA{N`|Kg|=Za>L2_vsjDj$0QeqZ zLB7G!13%qyJ- # zMZ2&u=MleL&wK-}*bGnfS$8G9+GH}BaHBz|QPdRN-8rMHyNF5WNJGbOyOpmnBSb#> zDKXU1BXRrPyk?hAo4S7FtY7bBG~ztw>dVXehD1-;{jjjPh$)`6ZA_>G2Ln#TXJ=nj zH9*USd&h)3aj@6m3H-vR?Om;~*6;+O&)hvB#7-1E<9KFuOVfa-u8MaRGFrnEgyH2M zqeG=!yMezws?xSpjhe^n2e^xv7{hDi&PP@6&W|&mp5!8AVc_)1Ia#Yz1N_2t4*?Sc z!Qdl}@B~5#A@!x1emn*Rz$t9toFh2~GogOE@5`x^79Gqmnu(0{9fS;<5vLW#=1v*! ze-RbGEeme~ 6Sa zg}W1202DgcD`EM?LcH;RMc$sl5)7lz_^#~}SEe-P(h&xE_P#_3M&Ve8%)6{>Ji0Fb z%x5v~;ZsgD=iFH|m1$ez#Td{94qcZwy0s=namw;=24FO{ZRn7%&NjF=CPhig+MZ&J z!gp#n>rB&nHdN;vUEm8C&naxm(N=rCrrO*qt2#1XgC~$O|M8XtaSM2&UUbDz_Dyud zfWom39K7sgwzld2$NMJxQ-RLnd3PSO;#5<{wzlNqAG7?Z6f3XJ^A44^l?PgIcgIvO zjA0)!e8c^>B@6%C@?UXJSgP;s-B5NKX)DT$3d-aL+yY<|mN0(m+=-()`&x5b5mAw4 z$;Ym|D67Q*fNtyI%;MI6x5N{P=yV1E=$|B~ z0RC zAFBuw<2sVrNB8BndN@<=^?L>aQKZrFK~1zdW?fDG 65l_JaqgN$mGv|q zMo9pGFlsUmY&udT9@8U`qEIw)*AjZ`kQi6&?n|Tk>8X<$^FqSj`t2oy%WF4sc>BGp zA80eqoJgM=)Sig}0ENx7i5=M4_V_VP`JJRYnqGYxO5Boqwd}#IJlEdKIxqkL;mY#V z{Xd p@XuSj^Rlk}_UpAioBLT)fWc(gIoSeW6oIX4?{SHK zd;or@(JMQRu0KoLYPihWs*IFVJO8?GJ?h&{Lw(-28YAD%u6;tEpp#p(s-9m-&Km0- z#J~W6CbDzs(7nCn(&ehcM>n46VmtBbKRz?dE6Xx-R9;= AUo_uQEB1Rn>P!Gd3s|2z~OSN?Ii&Duk^xDQJ9^2 N`mXzF5K=d3l-Ir zR#sHWOqd&w&*8MeMDnKdG> Z;^bhPEOnG`SzyNyeTbeD3N((bdr>E6&RBd9DMUuZ31 zwjlfwak+FV6?9cqN|VX*b`~b0_{{dhX?l;(0|dyVX?h)ezX&(>xooJ-O#1ujyiU#% zR_pI=8P?WP8kJ@)ewL-dqY-@kSDVC(v}~vLGuaF}1{8<(!>6`y(df7M?D@$*esieb z@mZn5`fo=qxXx%Yn@wiC{MPT^pSNE6%Va
UF*}B5`&^QxhWn zmk&GKXT$QH!>=z5s9&C>SyNm7{PxA @GOYXV=qaxMi^>(cbRW^x z;o9+XW%>O>C$kdg2emes$gC{SIJS9D7RzVMu;G3!ObNylScS#-(T|t6Jf1|Dg DSVD zw$4!_K55@*lW78xoo}c&?MkLrURWT5XxBGBE2%DgapU0rG`i2U;r;!ZH5T8=GqUgS zxQEBJuHJ4^4gk`ZX52q_;DTHjGpJ*?z?Qm=>EIjLXK4SVJG-(T-8^;dPWR=#ZA`^4 z9-ce-$mlvg)Dvq~XiS0FBgldND7(BcBmG`J5fJnb%Fa+JR66aoPt%Fo^jpcN&X+j! z84(}n*1SL^zTq)F`bOp6WV^cC3mg8Y3ate;c05{oE4E-==G~MFR|+}Z`gz&6u%A}; zHqKsB+RLK+=g+g!OZ$6xQ2 ~maM$8rm_x# z-`^MjjmH;qxm=>Cq^zd8)(oN#EB8&-q}{lesc?)B4e*m_o+@h9Iuih3Mnc;6lZDHs z4+xbs&Q1zOX>bfRMVXq;b3glh!SGn;Mw5;xbc{~LC;*n43b|Ho0BC8wk;R~{%({2y z#=R$b)k=cOb@7dg?cOQG+s=yl9&Wj;u((pK#%az{F_Zl+o?2X%egEMdQZ7V#WpzpB zy~}@Jyqlvi)7`s{UbN!#;Zd$a8h}n-RcA8Fa-Qc_$O2usd^!aK;D-0^dbdEpg!(sZ zetBlkh3B}(5^p-qqG~p@K|aoWZWh!$zjVD|atCi)TLu6y81OCvh(;l5&Trm+wLt5? zaAbQM5v#>njIho@f?fA9Ws50B)?WH~>-9VZzyI)V&aLZy^1?rv-uvi-ed&28x9lC? zdrdFrkE|icV6?@R3BY&vl~Sp>fDwLUzwGbEq)}L* 6VyIGyp1<$`gwLs646H;4fDV z@T$KdoiA{U>k-Jfa^IlH%P59{B6zz{-c &;PQpOzCltlPpvG-DKvr8t0aOfCc{=Fq?xKTRRsmrD9ZJJ zML+Rh!4sIsGp@U9Nz%m!MY2X(N}THKAK~jJ;=DSfwU6$-kjd){0nzD5uTs{^5CDMA z%pegJfHY@6%av>5NIQLMa%x4bng9U6>|LD2tdET2|1F`rwF?rl7!&}8_u#L04zB-~ z%sO~0&uFZtp61YrI(aSCOa}m*q?7dvTl+FK!#vs yoOlY3-7=~F}3z&e6 z$f(f(003b!=3Y4Q%xHEkI`_lco6UD1OzIq_jaY1E@D$|*S_pf4r8*ji=QC({hfVix zXtOVzxU;BT_j-aX8iVEJ>p~|VYRaykzFr$Y$c6%dESkEKl;nK>q2C1n03d@>@$kg? ze1hpF_V%`W C3LmL1a_ZARj0mtc8~I5i$~BDmTEcHVlk<&tI;ad;4EZ? z44v`0ZPXZ=Py2TMB0!|sq}S^e3O#N{7K2)AGBguG)965@0*GB*oy6kT2BxttG>r~S zN{f gE BJGx02Kw5C4 zUawLc%mf0^X|)7NywwttCD)Ul%9SMV_P$?BnwVw*z*M55v4j$_SzBGEHk$}|#TfA= zd?uBG=_<-)X0!JP*4k|$sPa)Nwf&dYj5ZMl>kgd*IVY13T`ZaGFJQ7L08sJp?A7PF zSsD3{Z=Oxb5KNm)kiH*z@3y4&?#=3Ui#^+F!Z53L-69yf|2}de>&t*(3Lw&0b${35 zjn4um&YZH`Lqb-+xbXY>Zxh%5l(6Ra-=?$^eC(hAvKVU9ep|ZcN+sKW+*d2!ADaL` z7K^T;yb>r(ih$35doYAds+y-~&y~6l-x$Sf{FA~JI`$Zo;Cp6ghO+wE$^CaHt?k;1 zN71CL%{+7Jp@rk&6&!Bc>Vg -#-536}t9=o>UGL6ZK!~&$^=hR;rMCbg z2%}c31K_njr^);@2#rqX355&*piqF(qJ>7Iak*TKv>0`|8YRFR;R#YvT6F*7wHmAN zfuRAdP#dmO i%OPD>Gg^xSeKn806#Y zA#B+q^iP=kL+k{Tg=S?fY+C(P8qjC}ElbMjb2Fc(Juj?dNUrYRl-uGR=*lu{m{PIW zV%Ex4G6i@^0H_U`ghmA>t%XwIa+v}Siwb7kXu&_Q8akWD;&2#LvqGUzt91avcc0Qi zbY;a=dWL7DpR1DqFV4xUs;D6WFn+-BY14-HYQ`o2kgP(cQECtXQ0+U9nVHzTgL4ae zZmcdYtE t=E7>3cUtepaTPwbp9bV#pu?jm->s55Nr zrChcpZSR_OeADL>`-HpM@mVx_*r+jXTq`aW0D#CyzWlsGj {r+v8FU7df??1Co={vnky5JCQ$m7#xvW>w z2XWWIG4?03Yh)RJpL((+#@34QZpZusg#iEz#}Gf^YumflZmuk9*|V)P=UV@?YC^21 zRK#OYAZ+HB(zfWJ-(Q$*QS)N}0;_u5; ;tONiE;WaWffIR?dHszoE>w$(smHKz^ zf8|0HI>S4DcsusZ6uj>Kfip!@7kIVoYhzr}_>)2rSWB1yW(-=z`xr(M@Oc;_38T6~ z4gf&tb>)}N->Nm*6kq?W5*H38S_Yvk{B+Ot!Hiw-h?L17HoX0)_#T(E?$hQmGOV zj}C|+aBa&C%zs&4gEi<(n%IU*qbMjedaD)&ohB6WFtXO9S1NU_M+X3aMx%2$Tnvml zV|BUA46g4ibM-IDe+5s_I4)tm=Z|rs$=3aKJ6}a008pjv`*}} x99`_kH>hO zQ(m3*ywGfF@WJJ&zbsjJyq-hfB+oH3_#!bEz^IlR5!tvW!f=a8Tp06DJ7dUwk&h!1 za|jRhe5F^GmnpYP5yGV1$MvzQFCLAIPq)lp{<6#8PrmFb!~lTl6g_p@ri`$jGrw;4 za7Vo8?J5}|P|MT+0Ki~;aH;Y7CD0H%A&Xg_dMn#P MP`jW^YWFl z;7|Iw004m5U?_h6q6#Pi3YYtOZUd9c@dyvL#&YX)>X%RR(7+B&-ND`wdRVmO>8IAD zJ$yj+TKR31-G{C^y0wF6w}g&6AKa+SNlAV@J*qPg15#I6{NiRxAx}tAlo#E 33Bjj9~#g~vqZ%77y>|H3Z1;&-!Zl{I)%-q zQ$VBCl$BHwQEv4MkIrOT@p*hIo5d6e-a~6qDQ%X6d(+W&ej%|vyIgqsch1G_OP<}n zJZAQsi31}atGye#RBiH;QLOg2OP`*|FEu9({);O ziB0ElC^V|2I !SnMMqxNb3>q5&$b#!Na+RVyBlYs7 zlxw$& #~CUJ79qm;aKobW5j#&W4Cm1R0BmeO*%;!E#|_y1`E=iq zdn-GBq_2a_`l|Ck{(4W(5V& |`x#x&;83wDz%Gz4l~eY81H_4_uzpX}EK JQi5?-&t-iA |#bg+=5{;e0% z$mCfMU&S)kYedD`7X7MYBR)nUhs&q1jWoZRm53Q0U_3CWYq}wbjKr`T9s| zMQ(O3jThkN6A>EH3S|k6>N#){0G03H<@vt(gdZTaMHM+21xj`(k0*RBtY`3r@5r11 z0CQD&aY=Cr)2g$lUwEi@OM?;`8j~;z0B9o5Am?{^Kr{d<0Pzu8lt!abX*3{=8WpZL zd?-A@4;(OWZ5N7AYA+&+&gYbsRha>R7ud5?M36^IbRB65vI{Dz6eIw}rE{;C-~hXp zbIDTlJg>Y$ZUz9po42djy5(y`x|c_P{rxW1bK$s=@jfC704JdD`~^h|79ZVz#>H-0 z??Ca}?H@AeRc12|K&J!colt 2>Sia93L9PHs?4kst(GS72o4DGuxb5jBZmM0Fl>TdB&-hARvso+-5k znwP6RmGMq*hkTxvp)ueHcw8<8d%bi*NTe74U}lZ3;#qnL-rWJ1|4G!=W-Y(avD5k; zeR(po@buEeTc<`X`FioVjvi96z|rx)=mKEOHgOBU>KhOMuE7BS5J}3@?q``WjLmC9 zgHvc!=b&I0EG5fq$bRtD0A6qN12F&wFk54i4gWI%7=TEmG2sA!Ot>ojQ5r#KI(oGW z4Q;&+Bcfsl0bq2F#P>Dtv8@{dY6^e;S3<#n{)R*h4Cpi C{a%M=cEd@UFeX zT}1Ug8`f|PgX y5#94^{SmA=)|(*;LvVXZiF_=q}+kywirfe_%@5 zS^aa*yS1ogWjG>%9UL3@DmsyOF)7bT((Dq(ce83}(xm*l>j|gRijGz-JWXcDq5bWD z+PV4kx4kCYzDq5eOpO(_{@H{jNwTK824OUYZPd_l j(_i$mtQ+| zt+w}I8!*(B-Z*rPI&jIt+Knqxs#DJ-7mk|hX;#;yT}#n*Ue=hhOIQebRh=0SB8^64 zQ!;I%V00$e!NraaH3XqiC`@nA=xqohah wXUCGYrVZ|JoG$rS(qAOJ~3K~&G5 zw734C6g`^Q-Yu?QXWQ%NDhuu;->-}9DJJ9vS@%=&YzIt_FWvde^P;q?cghonIjJj( z(x2y0qdx1{s3T1{u2d)yU1;mqb@;gWw`ZTcS*&bq{6mB6E~ZozzI<>me}J<$i;4j< z84N0wmXNx*g@k*2?6u!4RB1%~xcMrTdgI=^`PJ#CHZM%Qa$)S8xswM+d5So%7p?;Y zRD@^%h#-LYP)AT2F<@LcVv5^zlbLQIzWlu0s)FLYl1zn#?Hv;MuCzRiATW{uKnOmh zfVokl(d+dX!_&WGuOTBNUyBo771FHN)|I@tckSYpM{;3U&!MwEmExMpqDtla-}C=U z>_b8V9i$I)b6?!KUpOGtS8UCoV1P{3)hgUfb&rYg5ewgyI`JNYEJls8ChI}+g>$!y z*bxbdvsU;RA7_=;mA(61sH?5BSS&~o*ke#)w43PtaV7@6zS5o9jD2_k`(`nx-OTsx z6hMEIqpPb^kciZq%{W5!TFW7_Z+$Mo^Y_h0{s1AK-h1iUW$hdH@N{)`MnE*3&J2J^ zlzIO0la!hz2NSPMcdI{uLKDVK8R)U|$Cn3>r>*GN@uRwkOt>=T )(xTTh8JJe5sJMA3 zDSP0Iz$T@R8}Xvt5;I^FjLvyqBacY4QCV61^3KHzSJLFR9fuE_v%+6cbZ1lHdtTt5 z3Sls)Yieo$#HiVQX;W|mu9B+&%mkq^;jf!Qo)yQz*_sX&W)p7I))|=^m5QYC9eq3Y zA2-D5zkG()V$o_93WWj?LL~WdOAy!Tv>Gj<+j)d_88oiD?YqzTw}nchQ7Kdayxy98 zsIa8&zES`HvMf8dprTR-04&cyH%I5zkO7{bT~v=wU?MM1cd4}1`mq!~ewJHWsRsbI zPe`DHgS5rTNd4%1$`hGjpudw-^QIO%utTuZft?ppUUUibiE?=R1`c2d!vFxiF)gpP z(CKs@@6~joR}S$W^VNzPgXOo=_w#F>o&Ik6opb$Xe7bOa-w1aphfcu&s;ldCxS16A zbx9c3*;n$f#S$gCC0ZknfX<=6r5<*pXzS_>1c3k GA3Pz{V0Z0VR(f9vA z{!@4Yfa5=U&iE%US^yN8hkn`-=4Csnn~%6LUy4DYG p0xS%K+m*9KV^NxtDZHH+lG5)jtNn=CUyW0Hg7PLH$;ONYE &T_@{-Xr)oO5T=UB>)Aw)gW#+pnBeJUN@3 zHEyD${7%vhX7`^vH$5eWQK=LR2(v{ctI@yyr2+-x@fZLYg+gVsKftNR7}j1< 2E+N6tcuT|oGNspC_NcRYN4 z_1K=sfCar=Ihd*BW!B@Dcu f7*Whc212f{nU@2Jx_bM z;OoyPbn+0;UU?0K#bU9T0az>=o&BNUA5$cH3*tm`aJA)>)l}TMYEsjLLE*mdG a##^5^){`JZJ;U6?hFbaibWkmsm5N^a@1EQM* znYA@9{yvlAFlPORFrdvAoWTe0J0DUmHvGm zktB@rf`?~ztpDvY#LQi@X=Q{B1Fy~~{gB7{r-Be+vKTC2G#Kk@ <#IGZpGtDn(E zgGo`ReeD4$G&+sV2EeEc8iPx-7%jMw(3zC7@@fk>{l{Ks3n2-UiNFcaX*5cu5`oQ! zuMfgxB1|};uC1+;$qBIi-)mNj*-Q`?06K&5?(ZJnEJ&I2vaqsD1po}^0Dm_pyH*Jw zWJPvCX;rNe0C;{uzIKkSU~x-9YI
k0nErmA_Cyw8D?qBf**DV1pKgR_qj)Tm37Zf ze>*+(_PlStoj*L%*@{82=naS 0VL)MgiKl!FM~# ;p$@?{zz9=%8-w~Y n4 zM$5-TE))s{17O%V+q6O_NJHxGLr;wVkG=B@jG}lO`0Vw5mrL)x5&|JW=+Z$D1qGC% zh>9J=0xF6Hq$(oP6cj|NfPnPgLP;gPCzs3h-rM%~Ljr{4Ab{BZ@BRED*}dJJ-I;lw zdFCm!&}ZIzrN=E5R$E`E2MlIl`#o;~iv>teeQk4RcY~nN 7x{FDqD?9k;MJW*}Z=N&z%%zklNmVd~=$s?n2 zyYa^1{nxe9%O|SBMosK}Yq-(qw4kUE79bGVB&nBl=vgEHIF6GLlgW;b3V*OsN*6|D zJ~jX2$*F8xL(ZNhUmdD*pbzqyDg5A=9wWvkQ>+?k;hD>o&60xL3OM`eCws@l^nH9l zs8cPwaqjq)#)je&L)4&A8MkLnI+G?8v*;d+Nm^a0@YW4U4DmQ^&d#2P6WMFryWcGT za8$VQ`u?RKe6(cE`tMh-*>TY^aNak|-W?b7S4`G8iXkvc6DJLuwRFqwEz4gYn;9); zx+Ir=`(e$NW3`?RkBng`ilTtR;_|tIdvC5hFid|Dgj$MERU$P1#LVc<6)2WKDB|(h z0P~<87`q?$YybcN3=W6EVnL4P+8U)?<86W?5U0cChB1;vlsP{xn*YV7>ez8_ygp@E zg5TdLJRS|gj9H6TEPeCIH2lV wpZ;kL4!AHM#< z`aIEd3qP4UDB)gS|6UOUve`5Wq$w+^vpK!aK$y#7^I#nmQMVf=2m&yJB7&$75ZrSo zq`EMd?4BCH@Scx|L@IKN xe<1OR|Ur032wKvdztAz4fS zAPOyJ#=fGm{9Q9|XXYVE%v5qihSEjddL})TZ2|y5lBCOmK@=)2C_e5%Em#yPRS*;+ zf=Gh&*gT%Q-os v|SAD{K&$Z#6KY^M67yUyruja4d@A{4SA z67krTjgrnSlO$r5 o7E%epBTp!I)g0`vPh> B zm|$|*;*bC;AY2xsrb*VJfh3NTFajVZm+v1C%x5rITpk5BYMSaQ8okLG5`#T1yAyfT zn@v;}o5STXaEn6LT-#{#vhz$}2m-_C-S?#FY!;Kvfm~{7y|h7&d3$)&?Q*%?4 wDh%R6=m@chK>u}t4 zkhS6`7PD9^0AL4 f;shsV#O!r@_HSPF%Baj}F^AM#TKn1Zy*HXI7ytqx zpGK#lk{iWxlg(S_BT>7>?qz201c8W6qf!8>Y-n)79hH=J@Pb2ybQ%Q!Rb`z8Mmlsw zg1|i_NkWX6=s Njx4YsMCo8z>FL7<=!)=w|@9c-?%_N9aWwC{KdETOW+56EsqX} z0tLl|(;+|*9$8s+N7afXakmEmh(ZgBjQ-Ob9JlUDVT(jm0(PrQu9kY$1V|ipnXR6` z*Tv _Gv8$rAdD|3;(CqwMRX_kVIyr@ZyT= h|2nRMuv#PW~ z-9)#N+^BBkRXgB2q!)=g%~m(o`T|JAQg(RtJJTmlm^5qIZ-rW?Hz9aGh(bf@5Cr%< zW*h4SiKzCk-djcUA2R=gNg=oQ3~hQn3CNB JGmJIK@lmb0ThyiwHnQz z8jut!H6l5MPr*F4qMciF?d>H!Nx){=m91ZFsc1V9k>D;Llg{CFeSBhn9_la`?%aOz z$nTdq<6a&U0 !zK_e$V7Dy3=M)V#wAl9Q(<{kXyU~Z)?ZN!MS569;7L!+YEq*dNfaN<~{t6kqB zal}}6{p7wa$D2CSq6v&bVfsgAkDB)OmrFi;eq?s`@MjmS-t^;_)3d`Lje)xT7sb@I zwKsB0^aKEa9hf}gwWU97U;pux-eEL*ePu~OeqDRHgnB$Sm&*b92l$1Bv~j{ANs@d> z=iW9jxm0s)r5vUOq;$WpfO5aE;DBI%I_k37-H!WCh{g*D3KNNVkfH3#wUUx1S350$ z#60Tjdv+hcctedkiVp5Qdb&{U-z_sGI{KEOv87;iCn**FEf6q;BMyljJpJQk3+GIG ztap!LFMhRc=MSGu%?=Yjf<+pkIKE-i#Tq4BzC?)CHXCDh-w)Z8vFCOf9ki64{ylDTs_#0r3qMf^L;OwZUW{tjG%!w&zYlI*X zOV!TrkDJ9ko_lAamvqAS_c# Xz}*(D)%!Ul?lj3y z?z!wpd;H1nUOOVh5d{vMG9j4(5l)jbXZOLzn@b!9ixH6!$p)y?*;G(e+u6#MbZMkH zTh<&=QN{7ShV{5}-T1(bduS*;KXG(g3==oX>aQL@+kWODam3tIRdC^arGxGl6rP%q z#DUx {77x?OIg$qrZ{QPJIdM)G)qkdP36ibK&*l5?im+U|9b1Zt_vIeBQu z;aV??*gw^Ns$Xbucw870QC5{)znrV^v}+OO(Nz@Y -IZ4gN*k3904T!l1G7`(!kAd|(Z#Pm z{lfA?MQRMVtS+|)rP67XJ6+P~0HOliW_7@x&XkM5_-L_EKqJj26YRN*sC=-lfRvxQ zSf$dtNdSN-am1TzcJJEs-njHoE}_0~;+j%tB|u1Qq#uVxb=^2}>_)Suy`TlS$B_Ti zH+%Br1`oL9VMUVd0>W^TX#erT ITYY5x}2|by4!1o zX0_IW<3Nx-Bs+lp_l8td{ fpz hc$t>hf1RdMaPCxFsHrd_>Kz(xZRf` z36HTUf6wxb1upLwxHm0@_q?hmEz7y~FPJ1BNRxPIP#AWY%p{d35J$(i3C;TxA#?Y~ zqwDJ=0h1(t`(Ffd>z|y|DQ`VLw0jKimIsCU ;xJVoTf6RfvBH9a*v#y10e-EuL@iYXRVIt8wU1y3f|Fj?dn`7KMQ!yN zK;2HX)fh^mPP-es9b-x2cN<=JF}$?4)SF<59yYpn5U+JAK`ZzEbgohBKwGX)Vs5MQ z>b9jDu4r8?g4&V0_|@6VcAd((cKP7)S7)req<% b1pv#%D1A01%*1DFBcp;c~TneUc>bwn2F` z4lnNUX9oCVcJ;Mg%eK}gO?xq$0RUhM_?Z(Ybfq}-rH5AU(e|9sniR7YmQd7l;xoO4 zG}vY=K6R$y4*MvHxNOqmasw|UuK(nT8SUD05B=iSpDFC%q`|LzG-q%ysjt1TdGV^# z(oS6#+Q1A9O&&5bOW;&hp4q$QoTbZ?<1^?006|z-*60yQn6~EH@!zf>nZs|DMrygM zFfcg1S630_ke6N9y {h%Lq&!_1v_VCS-|86hqJsd3>A}tJP?N_`$I;kx4CS&q&PU(90WY z>+7U?i?ie2!~;O30-Kc+5fa`drL(jXEeIoH;vypaNVCplZ7ogIjt PC~f9yGU@LY|trN~4}bh=$#cI4+H%@{o}F^n#&lPFbM6ksrnp{*l;#5@j%3r2B( zk|f^t;w^9dE OV?vkBfyX+T3&u3nm`P}$1W5$de*0*b{ znDbz%(n<1`U~hjDS4Cd6+3F+-!~ *ZmdP{k~ogj z(+2d46me;o TI2iR0@qA(X&r9jqsS2y8Cuax(SuRjLhsAOT|#PqCxBRP9$p6 zmh9bGz{r@;)`op2Orc-brzd4{A<|>f7X7}nz}j*xI)@k2vtKe3xJ{bE-}hc`qsfv4 zfq6U#2>=NQyv3IraX94_ xn#u`f8VqtUupNWID>NZL8jvmX&Q-t(O zW#4KAK>|PmoFF@FRorZg7e)SNvu`(_7a5<_wR ykQ@{4PX`!=-6dHG4FC%8V8Bbv!kusudUOp75(A4u<8-*Z z_f-^WU5|CtD!iSoFF1XzN@c!f9pEO8m^Xh~|F}RV4#RGOz=5b+zwTiHd>U4{`@0<% ziX|q`%{iICJPuRU$<>>iqPs>3{n-Ej7L`hcAP99^&2H>wdkECktTn?Z-ty^MZxx)? zx2wN^1)(*E_MC4}+uE442#*KG2@(Jj$6NI*!0R)9LTaFh4w XmhCLxR=Wr^7kGtaozrS%(w~5*1g_?L8VYER#$76 zrcfyW0suIjH+Pi@tdsA3*Ok^?9{;uy(s}@*(WA!A91+F )J6F< zI7i&=+3Ee;I8WTw1PHu$t9HYd!De+IIUs^gI88PC7q2+eWN{&_Hzo HZWv2Fd8 z@F#DTc)I)Yr~RKG2rwZC0tAk`U9Q%r!Eu}ffJ&jTSQLQcxXW(w+*%ePPMc1pa{@q; z0B@6&=H63%%gz Xf&iAKESYmVp8o&Taa3_r-Fx3nu{(^`A6(Kzg*W zMJ3f=`(e?VUym2o$W$t2Q{}mzKmKrwB0MTYEMx$X^wi|xU8BWJ3IqU9Ufg@Kyk2Q_ zxm^xZSwWe??)0Xph$CagY*y>elBVQFgF>Oz>on5qyM8?@F*;ixnlvdCCW{@$NfH18 zM%pNk9u!4y^(YdxJMdd`F=^J@5HB{Y=y9(;(=|lMXfX%K4tz9s-I4rynNp=vG#2h( z{`M!Q`F%4)%oc^R;@H_n`^|oi)E_%m4I>Zd-fL9uMSqE)9E4sNs>exGY}vF z>9DtKCX;weOg@Fd3LE*#gmgX~cbl#*pE={xy%($H3Z+WXEUCD#{hRmaZgP$PXkrWi zErozW0vHgEp>j!Sv(4$Sn)T8;nZ3=escc@vuxE#c0i3itIYVEV&}xRD^90GGpYF>6 z5XlG_G gY|mzM4=Q2&Xk6hlc$XqRs3X({)c1T;83J)=u vZ1gs3HiApeP9dPT;pRJX%7Z0Ryu$!r8!>yW`tMi@x5KQ(CW3DiqS1YkzE5ylA`J zKciQli~uT5qf@9fDuk(u@`_7JWk!?HplvQcd%m!$$%q5QX1AJ*MzhTcqXdb2U;@KP zfOtG8+R-cqdteO103h)@cjaKW+l8VyAaNXT^LNtY_MjLB0E~MuqK*FL@pv#C2Y|ru zT={!kZa0PzfW(P5*CAy`zge;EY)M^pU4x{dNhWLURcf_bqtzR&4){J6@g(N QdZXEHGpTEvv>tBub5CSN1+XYs&6W?}`tXPSS85w&N~J 6V^7Y1xr_OXu#fctueqFOe*~RAq7? zcUTJ%r%`fl$@}XnIDOw `%Lv4#&foFT2Yc9)Hy+yYez$hJS_En@+%zdO zt5^5TzR#|@puD%yfyQKKO?!2K2vS} SxokO1_IJK zoGxP@PotuUysE;~@k|nTXc|s!+TB2pAKE|4%LQ&;hs%!{G`tHP0K{sjIK1gdQ;Yt{ z5(i{Xd45 +0kWO8YJ`IUV?eDnRmGXCgEy#xRN zW@t*UCuU5_493;<`6myYSKJbn1ZHchD!qDD6Wwpr cJ zou!VjLBqS;v`(NN#O*?GfTAdZw3>;iQ5ji-9_tgs1J<%5o0omM=%+J9bqb|I(Og?_ zYU|faHx+pzdJpdxN&^4@1VIr5Z8bBr8LtQoyI>dtBthWBEmISl%jI)8bljqlDWo!! z-DXzGlv<;;t*Zf{m1%Wq3p=iBT0%l=NGU8kzj5B=!QH#|9{uXF1I4Ni8}1#kNb03Z zwUq#X5t)<_6B=}9;t5%mq*>LHcp|<_a$wjUi6@*UlilUQ004rt?DX(3o~8Eq&XXms zq|sw~r$uo9V6d1p8kHnbK;EIZk|at33Wdvt?oxE94FMT_v!cWN5k<4aZue%&CsBvX z){?`?X){}4xKkfvwDRofVyVi0dzPh!M*H)4{P?7}FfN-8K=`m{MrFnZvMJV*ALh;e zYU9DYYKdH_RLE=d5C8by+_jR-3B!`YTih`HB4hkHEGi^Fx&KT>t;}FD8T6XQ{QcWb z)S7K51WanFM6WlyP=WyQ6Q_+ykMLtqlt-5>-FvB`* _Vcai`A(0(=|U5=LZg4=FyI|fMLL|SG%+|}CG zAsQp>$&cS38pVOoy8UZ*)!lZvCNZb6>fnxiYIv`gK7Xs{Eh$6dSnF;kaYs%75@2o& z27n|zPKUGY-$@sSBLI*jiQzZ^0AlgPk4<^5r$3#rs|$X7bH*pXo-30nluAW&L+PcR zt3Up7i>%+Ams-l>5d`6Kxk*3*lEB+6e+YtbIGr$PF;U%o5Gsu(^7CVnsLLuVtI*jU zc9Tvkk*E+rWw89BVnXSdOHF`akR0Z^Mw+aNLWgTlM^&Tf%~q+3&Z zX!+#cX+8Q6dV1dGYsyC+mInZM6sa*$m)T;qNG|+-rB-3S-KEsU#|~Z15Occ6hX(N3 z3@RsM%G)pIR;hl=ZEBq@4A(b*yEQSGGroVaKQnFG+}X7%>&}Z6Y8!%COLu)b?a-Qt zP#$o*+`?XOEnPmgYg9{8zo1cX&(3YoZaGt~w4wU*t3Ua|Hsguz0nUPB2aDB46!Ky^ zV)PvUWY*z2jaBKu2 aaLdkL zX)?QRc6dYOwGw&HKyj3Sa2ixa7t3|7o9Yl+d-i0(#84
U?yZrYb^wF}{C2wpv zRiiX}a8z@7#k3)tqhkZ %YJ zm);ul%ZFD^6zdYwIlv=7zUYHp@Z(FiubrGkZFkL}m@oz)JsnonZX5>?D?IK#+}9}- zzfrGENjbExxgfU+8`!OFcf#BjgSM=+83BMwnCt5r4H>~aKA#EP7M;4_Y6SvF8t!bY zDm7#$3j{m{CuG )d50EBAf*Sy$f2qm%elfZ0^#$JTGo P^j7=R>j z)MM8x6;;(uBp^I4Q(gHDZDu-~&Eatv6q3MEkHe}_%cW{N03_^ENgE87L^g{f3{M~X z+N+leJ~$#PIk@Cq3h~v)PiF@4c>K=S7c{@H*uG;Pi_R?!&wk?Rp115rte~)zAtSQ( z>(MSlMrU;}E7AOudOtb)-Acux1LY>=m0v%-zUM2ENX(`nE?kf>c-p&*XOE7g0|G}q zPKQRRtd?sr0Gu|ftfA4E62;+i*mMfwa@mYVldA0ZuO{vLibAKexqKeG c^t9^S$&-v1Ek$(kkvI>QsQ`A z7EKTsoDi1~#5gZ0_+#4&QqzTF$M`8Dr%x0ELSJ `rdiOfFP1-1LR>0lbgxce%$L(}T>YJom69Q1Jyt!H0Yz_?JvY8B8 z#E|jNH`J^2yDv!eH} nT-VSfQ6#d(JPw;q zqd)*h+%9KBy c~{`Sp{jzbpVW4u{9$vw3VLo5|;kL*n|5oILrd$1-BY?7PI=;)v63 zH I9+;wHua(R?EO2p$Z=`D`N+-`&W#vZScS#?*we*ewl!CitOhg>ThJg7@- zK!eV%ys&AdU!gc)@SHcENDoWs)icJwpvh3X=bIG*Nl(7EM&JF751;Pd^NodX)~mlg zR3bH%A6z)&;Bvp<05))W5Wwp?_1hm_9T452t|%@ouaqHV= +E($!^smxR-6EY>fDmgwmd!fqb~Oj5V2j)v5xxsJK^%>D=vi3%?e~O zs1Qk@uuE5Z?7QV(pF@UzbKv`#ncl2-xW_EZ+xf}oCnM({-t=@F6Ofq0 YDZVm|& z$5eTTOUwvKAj)g2KH4~L_|l&DSk+LO-1O&HexCQzl0Q~1AN=*)o ZYwaKxr+EdFEVw#^Ss7Q@Sdvf-~XqCFYM(G43%jMM9H<)_Fa@Z_7gt`oBb*)5+0)n&{ z8w$$|-Mer(90mn-YSk*KR7C>9 xX6pru3We-g}kWWkVR6s9 aQL> zaJVM?saYSs`&>qASMW>89QV@e^$p8+mFZ;Hb}!65uuLEp@+hzi5hM RM?6l2OeK{YF@Tu#DoLHx91+UpFesSE zsgbMXYCQmiS*2{Ms4*ucaye`Ul^xaZiOH|aB&JOl>dZAKe|-1kPYec=L8no9VZEPw z>w_83bQ1wUV2H= Vl$uHDOl|LS>F;t(}IQR56 zzMmhLLBJj&u=_K!=gpcnB$5dT40b!4B?_h5j04zYP|6i*Td+TeNx?nN#;QhHvknKC zNh?z{H`~I)xhw{i8yOoH6BEQ}u0FSS3)|5c1&nZ1-?8Jz1^V4or3iyu-lQ=x(x1rg z)-|TpyF_1ESzgnmLvW(@*l&Bg_ef2e+?h<%%Cts k&maB^BgXy~0o)Tz^ItyUL+ zX#9lql(4XXJI0+LJ~cTuI;hN~VbWoXyy%akyEdOUCC&JBT;Hw%6aau