You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like: ``` with strategy.scope(): loss_obj = tf.keras.losses.
keras-gym
stable
ExampleNotebooks
FunctionApproximators
Policies
ProbabilityDistributions
Caching
Planning
Wrappers
Environments
LossFunctions
Utilities
Glossary
ReleaseNotes
keras-gym
Docs»
Modulecode»
tensorflow.python.keras.losses
Sourcecodefortensorflow.python.keras.losses
#Copyright2015TheTensorFlowAuthors.AllRightsReserved.
#
#LicensedundertheApacheLicense,Version2.0(the"License");
#youmaynotusethisfileexceptincompliancewiththeLicense.
#YoumayobtainacopyoftheLicenseat
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unlessrequiredbyapplicablelaworagreedtoinwriting,software
#distributedundertheLicenseisdistributedonan"ASIS"BASIS,
#WITHOUTWARRANTIESORCONDITIONSOFANYKIND,eitherexpressorimplied.
#SeetheLicenseforthespecificlanguagegoverningpermissionsand
#limitationsundertheLicense.
#==============================================================================
"""Built-inlossfunctions.
"""
from__future__importabsolute_import
from__future__importdivision
from__future__importprint_function
importabc
importsix
fromtensorflow.python.distributeimportdistribution_strategy_context
fromtensorflow.python.frameworkimportops
fromtensorflow.python.frameworkimportsmart_cond
fromtensorflow.python.frameworkimporttensor_util
fromtensorflow.python.kerasimportbackendasK
fromtensorflow.python.keras.utilsimportlosses_utils
fromtensorflow.python.keras.utilsimporttf_utils
fromtensorflow.python.keras.utils.generic_utilsimportdeserialize_keras_object
fromtensorflow.python.keras.utils.generic_utilsimportserialize_keras_object
fromtensorflow.python.opsimportarray_ops
fromtensorflow.python.opsimportmath_ops
fromtensorflow.python.opsimportnn
fromtensorflow.python.ops.lossesimportlosses_impl
fromtensorflow.python.ops.lossesimportutilastf_losses_util
fromtensorflow.python.util.tf_exportimportkeras_export
fromtensorflow.tools.docsimportdoc_controls
@keras_export('keras.losses.Loss')
classLoss(object):
"""Lossbaseclass.
Tobeimplementedbysubclasses:
*`call()`:Containsthelogicforlosscalculationusing`y_true`,`y_pred`.
Examplesubclassimplementation:
```
classMeanSquaredError(Loss):
defcall(self,y_true,y_pred):
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
returnK.mean(math_ops.square(y_pred-y_true),axis=-1)
```
Whenusedwith`tf.distribute.Strategy`,outsideofbuilt-intrainingloops
suchas`tf.keras``compile`and`fit`,pleaseuse'SUM'or'NONE'reduction
types,andreducelossesexplicitlyinyourtrainingloop.Using'AUTO'or
'SUM_OVER_BATCH_SIZE'willraiseanerror.
Pleasesee
https://www.tensorflow.org/alpha/tutorials/distribute/training_loopsformore
detailsonthis.
Youcanimplement'SUM_OVER_BATCH_SIZE'usingglobalbatchsizelike:
```
withstrategy.scope():
loss_obj=tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss=(tf.reduce_sum(loss_obj(labels,predictions))*
(1./global_batch_size))
```
Args:
reduction:(Optional)Typeof`tf.keras.losses.Reduction`toapplytoloss.
Defaultvalueis`AUTO`.`AUTO`indicatesthatthereductionoptionwill
bedeterminedbytheusagecontext.Foralmostallcasesthisdefaultsto
`SUM_OVER_BATCH_SIZE`.
Whenusedwith`tf.distribute.Strategy`,outsideofbuilt-intraining
loopssuchas`tf.keras``compile`and`fit`,using`AUTO`or
`SUM_OVER_BATCH_SIZE`willraiseanerror.Pleasesee
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
formoredetailsonthis.
name:Optionalnamefortheop.
"""
def__init__(self,reduction=losses_utils.ReductionV2.AUTO,name=None):
losses_utils.ReductionV2.validate(reduction)
self.reduction=reduction
self.name=name
def__call__(self,y_true,y_pred,sample_weight=None):
"""Invokesthe`Loss`instance.
Args:
y_true:Groundtruthvalues.shape=`[batch_size,d0,..dN]`
y_pred:Thepredictedvalues.shape=`[batch_size,d0,..dN]`
sample_weight:Optional`sample_weight`actsasa
coefficientfortheloss.Ifascalarisprovided,thenthelossis
simplyscaledbythegivenvalue.If`sample_weight`isatensorofsize
`[batch_size]`,thenthetotallossforeachsampleofthebatchis
rescaledbythecorrespondingelementinthe`sample_weight`vector.If
theshapeof`sample_weight`is`[batch_size,d0,..dN-1]`(orcanbe
broadcastedtothisshape),theneachlosselementof`y_pred`isscaled
bythecorrespondingvalueof`sample_weight`.(Noteon`dN-1`:allloss
functionsreduceby1dimension,usuallyaxis=-1.)
Returns:
Weightedlossfloat`Tensor`.If`reduction`is`NONE`,thishas
shape`[batch_size,d0,..dN-1]`;otherwise,itisscalar.(Note`dN-1`
becausealllossfunctionsreduceby1dimension,usuallyaxis=-1.)
Raises:
ValueError:Iftheshapeof`sample_weight`isinvalid.
"""
#Ifwearewrappingalambdafunctionstrip'<>'fromthenameasitisnot
#acceptedinscopename.
scope_name='lambda'ifself.name==''elseself.name
graph_ctx=tf_utils.graph_context_for_symbolic_tensors(
y_true,y_pred,sample_weight)
withK.name_scope(scope_nameorself.__class__.__name__),graph_ctx:
losses=self.call(y_true,y_pred)
returnlosses_utils.compute_weighted_loss(
losses,sample_weight,reduction=self._get_reduction())
@classmethod
deffrom_config(cls,config):
"""Instantiatesa`Loss`fromitsconfig(outputof`get_config()`).
Args:
config:Outputof`get_config()`.
Returns:
A`Loss`instance.
"""
returncls(**config)
defget_config(self):
return{'reduction':self.reduction,'name':self.name}
@abc.abstractmethod
@doc_controls.for_subclass_implementers
defcall(self,y_true,y_pred):
"""Invokesthe`Loss`instance.
Args:
y_true:Groundtruthvalues,withthesameshapeas'y_pred'.
y_pred:Thepredictedvalues.
"""
NotImplementedError('Mustbeimplementedinsubclasses.')
def_get_reduction(self):
"""Handles`AUTO`reductioncasesandreturnsthereductionvalue."""
ifdistribution_strategy_context.has_strategy()and(
self.reduction==losses_utils.ReductionV2.AUTOor
self.reduction==losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE):
raiseValueError(
'Pleaseuse`tf.keras.losses.Reduction.SUM`or'
'`tf.keras.losses.Reduction.NONE`forlossreductionwhenlossesare'
'usedwith`tf.distribute.Strategy`outsideofthebuilt-intraining'
'loops.Youcanimplement'
'`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`usingglobalbatch'
'sizelike:\n```\nwithstrategy.scope():\n'
'loss_obj=tf.keras.losses.CategoricalCrossentropy('
'reduction=tf.keras.losses.reduction.NONE)\n....\n'
'loss=tf.reduce_sum(loss_obj(labels,predictions))*'
'(1./global_batch_size)\n```\nPleasesee'
'https://www.tensorflow.org/alpha/tutorials/distribute/training_loops'
'formoredetails.')
ifself.reduction==losses_utils.ReductionV2.AUTO:
returnlosses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
returnself.reduction
classLossFunctionWrapper(Loss):
"""Wrapsalossfunctioninthe`Loss`class.
Args:
fn:Thelossfunctiontowrap,withsignature`fn(y_true,y_pred,
**kwargs)`.
reduction:(Optional)Typeof`tf.keras.losses.Reduction`toapplytoloss.
Defaultvalueis`AUTO`.`AUTO`indicatesthatthereductionoptionwill
bedeterminedbytheusagecontext.Foralmostallcasesthisdefaultsto
`SUM_OVER_BATCH_SIZE`.
Whenusedwith`tf.distribute.Strategy`,outsideofbuilt-intraining
loopssuchas`tf.keras``compile`and`fit`,using`AUTO`or
`SUM_OVER_BATCH_SIZE`willraiseanerror.Pleasesee
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
formoredetailsonthis.
name:(Optional)namefortheloss.
**kwargs:Thekeywordargumentsthatarepassedonto`fn`.
"""
def__init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
super(LossFunctionWrapper,self).__init__(reduction=reduction,name=name)
self.fn=fn
self._fn_kwargs=kwargs
defcall(self,y_true,y_pred):
"""Invokesthe`LossFunctionWrapper`instance.
Args:
y_true:Groundtruthvalues.
y_pred:Thepredictedvalues.
Returns:
Lossvaluespersample.
"""
iftensor_util.is_tensor(y_pred)andtensor_util.is_tensor(y_true):
y_pred,y_true=tf_losses_util.squeeze_or_expand_dimensions(
y_pred,y_true)
returnself.fn(y_true,y_pred,**self._fn_kwargs)
defget_config(self):
config={}
fork,vinsix.iteritems(self._fn_kwargs):
config[k]=K.eval(v)iftf_utils.is_tensor_or_variable(v)elsev
base_config=super(LossFunctionWrapper,self).get_config()
returndict(list(base_config.items())+list(config.items()))
@keras_export('keras.losses.MeanSquaredError')
classMeanSquaredError(LossFunctionWrapper):
"""Computesthemeanofsquaresoferrorsbetweenlabelsandpredictions.
`loss=square(y_true-y_pred)`
Usage:
```python
mse=tf.keras.losses.MeanSquaredError()
loss=mse([0.,0.,1.,1.],[1.,1.,1.,0.])
print('Loss:',loss.numpy())#Loss:0.75
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.MeanSquaredError())
```
"""
def__init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_error'):
super(MeanSquaredError,self).__init__(
mean_squared_error,name=name,reduction=reduction)
@keras_export('keras.losses.MeanAbsoluteError')
classMeanAbsoluteError(LossFunctionWrapper):
"""Computesthemeanofabsolutedifferencebetweenlabelsandpredictions.
`loss=abs(y_true-y_pred)`
Usage:
```python
mae=tf.keras.losses.MeanAbsoluteError()
loss=mae([0.,0.,1.,1.],[1.,1.,1.,0.])
print('Loss:',loss.numpy())#Loss:0.75
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.MeanAbsoluteError())
```
"""
def__init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_error'):
super(MeanAbsoluteError,self).__init__(
mean_absolute_error,name=name,reduction=reduction)
@keras_export('keras.losses.MeanAbsolutePercentageError')
classMeanAbsolutePercentageError(LossFunctionWrapper):
"""Computesthemeanabsolutepercentageerrorbetween`y_true`and`y_pred`.
`loss=100*abs(y_true-y_pred)/y_true`
Usage:
```python
mape=tf.keras.losses.MeanAbsolutePercentageError()
loss=mape([0.,0.,1.,1.],[1.,1.,1.,0.])
print('Loss:',loss.numpy())#Loss:5e+08
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.MeanAbsolutePercentageError())
```
"""
def__init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_percentage_error'):
super(MeanAbsolutePercentageError,self).__init__(
mean_absolute_percentage_error,name=name,reduction=reduction)
@keras_export('keras.losses.MeanSquaredLogarithmicError')
classMeanSquaredLogarithmicError(LossFunctionWrapper):
"""Computesthemeansquaredlogarithmicerrorbetween`y_true`and`y_pred`.
`loss=square(log(y_true)-log(y_pred))`
Usage:
```python
msle=tf.keras.losses.MeanSquaredLogarithmicError()
loss=msle([0.,0.,1.,1.],[1.,1.,1.,0.])
print('Loss:',loss.numpy())#Loss:0.36034
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.MeanSquaredLogarithmicError())
```
"""
def__init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_logarithmic_error'):
super(MeanSquaredLogarithmicError,self).__init__(
mean_squared_logarithmic_error,name=name,reduction=reduction)
@keras_export('keras.losses.BinaryCrossentropy')
classBinaryCrossentropy(LossFunctionWrapper):
"""Computesthecross-entropylossbetweentruelabelsandpredictedlabels.
Usethiscross-entropylosswhenthereareonlytwolabelclasses(assumedto
be0and1).Foreachexample,thereshouldbeasinglefloating-pointvalue
perprediction.
Inthesnippetbelow,eachofthefourexampleshasonlyasingle
floating-pointingvalue,andboth`y_pred`and`y_true`havetheshape
`[batch_size]`.
Usage:
```python
bce=tf.keras.losses.BinaryCrossentropy()
loss=bce([0.,0.,1.,1.],[1.,1.,1.,0.])
print('Loss:',loss.numpy())#Loss:11.522857
```
Usagewiththe`tf.keras`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.BinaryCrossentropy())
```
Args:
from_logits:Whethertointerpret`y_pred`asatensorof
[logit](https://en.wikipedia.org/wiki/Logit)values.Bydefault,weassume
that`y_pred`containsprobabilities(i.e.,valuesin[0,1]).
Note:Usingfrom_logits=Truemaybemorenumericallystable.
label_smoothing:Floatin[0,1].When0,nosmoothingoccurs.When>0,we
computethelossbetweenthepredictedlabelsandasmoothedversionof
thetruelabels,wherethesmoothingsqueezesthelabelstowards0.5.
Largervaluesof`label_smoothing`correspondtoheaviersmoothing.
reduction:(Optional)Typeof`tf.keras.losses.Reduction`toapplytoloss.
Defaultvalueis`AUTO`.`AUTO`indicatesthatthereductionoptionwill
bedeterminedbytheusagecontext.Foralmostallcasesthisdefaultsto
`SUM_OVER_BATCH_SIZE`.
Whenusedwith`tf.distribute.Strategy`,outsideofbuilt-intraining
loopssuchas`tf.keras``compile`and`fit`,using`AUTO`or
`SUM_OVER_BATCH_SIZE`willraiseanerror.Pleasesee
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
formoredetailsonthis.
name:(Optional)Namefortheop.
"""
def__init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='binary_crossentropy'):
super(BinaryCrossentropy,self).__init__(
binary_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
self.from_logits=from_logits
@keras_export('keras.losses.CategoricalCrossentropy')
classCategoricalCrossentropy(LossFunctionWrapper):
"""Computesthecrossentropylossbetweenthelabelsandpredictions.
Usethiscrossentropylossfunctionwhentherearetwoormorelabelclasses.
Weexpectlabelstobeprovidedina`one_hot`representation.Ifyouwantto
providelabelsasintegers,pleaseuse`SparseCategoricalCrossentropy`loss.
Thereshouldbe`#classes`floatingpointvaluesperfeature.
Inthesnippetbelow,thereis`#classes`floatingpointingvaluesper
example.Theshapeofboth`y_pred`and`y_true`are
`[batch_size,num_classes]`.
Usage:
```python
cce=tf.keras.losses.CategoricalCrossentropy()
loss=cce(
[[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]],
[[.9,.05,.05],[.05,.89,.06],[.05,.01,.94]])
print('Loss:',loss.numpy())#Loss:0.0945
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.CategoricalCrossentropy())
```
Args:
from_logits:Whether`y_pred`isexpectedtobealogitstensor.Bydefault,
weassumethat`y_pred`encodesaprobabilitydistribution.
Note:Usingfrom_logits=Truemaybemorenumericallystable.
label_smoothing:Floatin[0,1].When>0,labelvaluesaresmoothed,
meaningtheconfidenceonlabelvaluesarerelaxed.e.g.
`label_smoothing=0.2`meansthatwewilluseavalueof`0.1`forlabel
`0`and`0.9`forlabel`1`"
reduction:(Optional)Typeof`tf.keras.losses.Reduction`toapplytoloss.
Defaultvalueis`AUTO`.`AUTO`indicatesthatthereductionoptionwill
bedeterminedbytheusagecontext.Foralmostallcasesthisdefaultsto
`SUM_OVER_BATCH_SIZE`.
Whenusedwith`tf.distribute.Strategy`,outsideofbuilt-intraining
loopssuchas`tf.keras``compile`and`fit`,using`AUTO`or
`SUM_OVER_BATCH_SIZE`willraiseanerror.Pleasesee
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
formoredetailsonthis.
name:Optionalnamefortheop.
"""
def__init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_crossentropy'):
super(CategoricalCrossentropy,self).__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.losses.SparseCategoricalCrossentropy')
classSparseCategoricalCrossentropy(LossFunctionWrapper):
"""Computesthecrossentropylossbetweenthelabelsandpredictions.
Usethiscrossentropylossfunctionwhentherearetwoormorelabelclasses.
Weexpectlabelstobeprovidedasintegers.Ifyouwanttoprovidelabels
using`one-hot`representation,pleaseuse`CategoricalCrossentropy`loss.
Thereshouldbe`#classes`floatingpointvaluesperfeaturefor`y_pred`
andasinglefloatingpointvalueperfeaturefor`y_true`.
Inthesnippetbelow,thereisasinglefloatingpointvalueperexamplefor
`y_true`and`#classes`floatingpointingvaluesperexamplefor`y_pred`.
Theshapeof`y_true`is`[batch_size]`andtheshapeof`y_pred`is
`[batch_size,num_classes]`.
Usage:
```python
cce=tf.keras.losses.SparseCategoricalCrossentropy()
loss=cce(
[0,1,2],
[[.9,.05,.05],[.5,.89,.6],[.05,.01,.94]])
print('Loss:',loss.numpy())#Loss:0.3239
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.SparseCategoricalCrossentropy())
```
Args:
from_logits:Whether`y_pred`isexpectedtobealogitstensor.Bydefault,
weassumethat`y_pred`encodesaprobabilitydistribution.
Note:Usingfrom_logits=Truemaybemorenumericallystable.
reduction:(Optional)Typeof`tf.keras.losses.Reduction`toapplytoloss.
Defaultvalueis`AUTO`.`AUTO`indicatesthatthereductionoptionwill
bedeterminedbytheusagecontext.Foralmostallcasesthisdefaultsto
`SUM_OVER_BATCH_SIZE`.
Whenusedwith`tf.distribute.Strategy`,outsideofbuilt-intraining
loopssuchas`tf.keras``compile`and`fit`,using`AUTO`or
`SUM_OVER_BATCH_SIZE`willraiseanerror.Pleasesee
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
formoredetailsonthis.
name:Optionalnamefortheop.
"""
def__init__(self,
from_logits=False,
reduction=losses_utils.ReductionV2.AUTO,
name='sparse_categorical_crossentropy'):
super(SparseCategoricalCrossentropy,self).__init__(
sparse_categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits)
@keras_export('keras.losses.Hinge')
classHinge(LossFunctionWrapper):
"""Computesthehingelossbetween`y_true`and`y_pred`.
`loss=maximum(1-y_true*y_pred,0)`
`y_true`valuesareexpectedtobe-1or1.Ifbinary(0or1)labelsare
providedwewillconvertthemto-1or1.
Usage:
```python
h=tf.keras.losses.Hinge()
loss=h([-1.,1.,1.],[0.6,-0.7,-0.5])
#loss=max(0,1-y_true*y_pred)=[1.6+1.7+1.5]/3
print('Loss:',loss.numpy())#Loss:1.6
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.Hinge())
```
"""
def__init__(self,reduction=losses_utils.ReductionV2.AUTO,name='hinge'):
super(Hinge,self).__init__(hinge,name=name,reduction=reduction)
@keras_export('keras.losses.SquaredHinge')
classSquaredHinge(LossFunctionWrapper):
"""Computesthesquaredhingelossbetween`y_true`and`y_pred`.
`loss=square(maximum(1-y_true*y_pred,0))`
`y_true`valuesareexpectedtobe-1or1.Ifbinary(0or1)labelsare
providedwewillconvertthemto-1or1.
Usage:
```python
sh=tf.keras.losses.SquaredHinge()
loss=sh([-1.,1.,1.],[0.6,-0.7,-0.5])
#loss=(max(0,1-y_true*y_pred))^2=[1.6^2+1.7^2+1.5^2]/3
print('Loss:',loss.numpy())#Loss:2.566666
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.SquaredHinge())
```
"""
def__init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='squared_hinge'):
super(SquaredHinge,self).__init__(
squared_hinge,name=name,reduction=reduction)
@keras_export('keras.losses.CategoricalHinge')
classCategoricalHinge(LossFunctionWrapper):
"""Computesthecategoricalhingelossbetween`y_true`and`y_pred`.
`loss=maximum(neg-pos+1,0)`
where`neg=sum(y_true*y_pred)`and`pos=maximum(1-y_true)`
Usage:
```python
ch=tf.keras.losses.CategoricalHinge()
loss=ch([0.,1.,1.],[1.,0.,1.])
print('Loss:',loss.numpy())#Loss:1.0
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.CategoricalHinge())
```
"""
def__init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_hinge'):
super(CategoricalHinge,self).__init__(
categorical_hinge,name=name,reduction=reduction)
@keras_export('keras.losses.Poisson')
classPoisson(LossFunctionWrapper):
"""ComputesthePoissonlossbetween`y_true`and`y_pred`.
`loss=y_pred-y_true*log(y_pred)`
Usage:
```python
p=tf.keras.losses.Poisson()
loss=p([1.,9.,2.],[4.,8.,12.])
print('Loss:',loss.numpy())#Loss:-0.35702705
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.Poisson())
```
"""
def__init__(self,reduction=losses_utils.ReductionV2.AUTO,name='poisson'):
super(Poisson,self).__init__(poisson,name=name,reduction=reduction)
@keras_export('keras.losses.LogCosh')
classLogCosh(LossFunctionWrapper):
"""Computesthelogarithmofthehyperboliccosineofthepredictionerror.
`logcosh=log((exp(x)+exp(-x))/2)`,
wherexistheerror`y_pred-y_true`.
Usage:
```python
l=tf.keras.losses.LogCosh()
loss=l([0.,1.,1.],[1.,0.,1.])
print('Loss:',loss.numpy())#Loss:0.289
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.LogCosh())
```
"""
def__init__(self,reduction=losses_utils.ReductionV2.AUTO,name='logcosh'):
super(LogCosh,self).__init__(logcosh,name=name,reduction=reduction)
@keras_export('keras.losses.KLDivergence')
classKLDivergence(LossFunctionWrapper):
"""ComputesKullback-Leiblerdivergencelossbetween`y_true`and`y_pred`.
`loss=y_true*log(y_true/y_pred)`
See:https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
```python
k=tf.keras.losses.KLDivergence()
loss=k([.4,.9,.2],[.5,.8,.12])
print('Loss:',loss.numpy())#Loss:0.11891246
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.KLDivergence())
```
"""
def__init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='kullback_leibler_divergence'):
super(KLDivergence,self).__init__(
kullback_leibler_divergence,name=name,reduction=reduction)
@keras_export('keras.losses.Huber')
classHuber(LossFunctionWrapper):
"""ComputestheHuberlossbetween`y_true`and`y_pred`.
Foreachvaluexin`error=y_true-y_pred`:
```
loss=0.5*x^2if|x|<=d
loss=0.5*d^2+d*(|x|-d)if|x|>d
```
wheredis`delta`.See:https://en.wikipedia.org/wiki/Huber_loss
Usage:
```python
l=tf.keras.losses.Huber()
loss=l([0.,1.,1.],[1.,0.,1.])
print('Loss:',loss.numpy())#Loss:0.333
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.Huber())
```
Args:
delta:Afloat,thepointwheretheHuberlossfunctionchangesfroma
quadratictolinear.
reduction:(Optional)Typeof`tf.keras.losses.Reduction`toapplytoloss.
Defaultvalueis`AUTO`.`AUTO`indicatesthatthereductionoptionwill
bedeterminedbytheusagecontext.Foralmostallcasesthisdefaultsto
`SUM_OVER_BATCH_SIZE`.
Whenusedwith`tf.distribute.Strategy`,outsideofbuilt-intraining
loopssuchas`tf.keras``compile`and`fit`,using`AUTO`or
`SUM_OVER_BATCH_SIZE`willraiseanerror.Pleasesee
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
formoredetailsonthis.
name:Optionalnamefortheop.
"""
def__init__(self,
delta=1.0,
reduction=losses_utils.ReductionV2.AUTO,
name='huber_loss'):
super(Huber,self).__init__(
huber_loss,name=name,reduction=reduction,delta=delta)
@keras_export('keras.metrics.mean_squared_error',
'keras.metrics.mse',
'keras.metrics.MSE',
'keras.losses.mean_squared_error',
'keras.losses.mse',
'keras.losses.MSE')
defmean_squared_error(y_true,y_pred):
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
returnK.mean(math_ops.squared_difference(y_pred,y_true),axis=-1)
@keras_export('keras.metrics.mean_absolute_error',
'keras.metrics.mae',
'keras.metrics.MAE',
'keras.losses.mean_absolute_error',
'keras.losses.mae',
'keras.losses.MAE')
defmean_absolute_error(y_true,y_pred):
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
returnK.mean(math_ops.abs(y_pred-y_true),axis=-1)
@keras_export('keras.metrics.mean_absolute_percentage_error',
'keras.metrics.mape',
'keras.metrics.MAPE',
'keras.losses.mean_absolute_percentage_error',
'keras.losses.mape',
'keras.losses.MAPE')
defmean_absolute_percentage_error(y_true,y_pred):#pylint:disable=missing-docstring
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
diff=math_ops.abs(
(y_true-y_pred)/K.clip(math_ops.abs(y_true),K.epsilon(),None))
return100.*K.mean(diff,axis=-1)
@keras_export('keras.metrics.mean_squared_logarithmic_error',
'keras.metrics.msle',
'keras.metrics.MSLE',
'keras.losses.mean_squared_logarithmic_error',
'keras.losses.msle',
'keras.losses.MSLE')
defmean_squared_logarithmic_error(y_true,y_pred):#pylint:disable=missing-docstring
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
first_log=math_ops.log(K.clip(y_pred,K.epsilon(),None)+1.)
second_log=math_ops.log(K.clip(y_true,K.epsilon(),None)+1.)
returnK.mean(math_ops.squared_difference(first_log,second_log),axis=-1)
def_maybe_convert_labels(y_true):
"""Convertsbinarylabelsinto-1/1."""
are_zeros=math_ops.equal(y_true,0)
are_ones=math_ops.equal(y_true,1)
is_binary=math_ops.reduce_all(math_ops.logical_or(are_zeros,are_ones))
def_convert_binary_labels():
#Convertthebinarylabelsto-1or1.
return2.*y_true-1.
updated_y_true=smart_cond.smart_cond(is_binary,
_convert_binary_labels,lambda:y_true)
returnupdated_y_true
@keras_export('keras.metrics.squared_hinge','keras.losses.squared_hinge')
defsquared_hinge(y_true,y_pred):
"""Computesthesquaredhingelossbetween`y_true`and`y_pred`.
Args:
y_true:Thegroundtruthvalues.`y_true`valuesareexpectedtobe-1or1.
Ifbinary(0or1)labelsareprovidedwewillconvertthemto-1or1.
y_pred:Thepredictedvalues.
Returns:
Tensorwithonescalarlossentrypersample.
"""
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
y_true=_maybe_convert_labels(y_true)
returnK.mean(
math_ops.square(math_ops.maximum(1.-y_true*y_pred,0.)),axis=-1)
@keras_export('keras.metrics.hinge','keras.losses.hinge')
defhinge(y_true,y_pred):
"""Computesthehingelossbetween`y_true`and`y_pred`.
Args:
y_true:Thegroundtruthvalues.`y_true`valuesareexpectedtobe-1or1.
Ifbinary(0or1)labelsareprovidedtheywillbeconvertedto-1or1.
y_pred:Thepredictedvalues.
Returns:
Tensorwithonescalarlossentrypersample.
"""
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
y_true=_maybe_convert_labels(y_true)
returnK.mean(math_ops.maximum(1.-y_true*y_pred,0.),axis=-1)
@keras_export('keras.losses.categorical_hinge')
defcategorical_hinge(y_true,y_pred):
"""Computesthecategoricalhingelossbetween`y_true`and`y_pred`.
Args:
y_true:Thegroundtruthvalues.`y_true`valuesareexpectedtobe-1or1.
Ifbinary(0or1)labelsareprovidedtheywillbeconvertedto-1or1.
y_pred:Thepredictedvalues.
Returns:
Atensor.
"""
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
pos=math_ops.reduce_sum(y_true*y_pred,axis=-1)
neg=math_ops.reduce_max((1.-y_true)*y_pred,axis=-1)
returnmath_ops.maximum(0.,neg-pos+1.)
defhuber_loss(y_true,y_pred,delta=1.0):
"""ComputesHuberlossvalue.
Foreachvaluexin`error=y_true-y_pred`:
```
loss=0.5*x^2if|x|<=d
loss=0.5*d^2+d*(|x|-d)if|x|>d
```
wheredis`delta`.See:https://en.wikipedia.org/wiki/Huber_loss
Args:
y_true:tensoroftruetargets.
y_pred:tensorofpredictedtargets.
delta:Afloat,thepointwheretheHuberlossfunctionchangesfroma
quadratictolinear.
Returns:
Tensorwithonescalarlossentrypersample.
"""
y_pred=math_ops.cast(y_pred,dtype=K.floatx())
y_true=math_ops.cast(y_true,dtype=K.floatx())
error=math_ops.subtract(y_pred,y_true)
abs_error=math_ops.abs(error)
quadratic=math_ops.minimum(abs_error,delta)
linear=math_ops.subtract(abs_error,quadratic)
returnmath_ops.add(
math_ops.multiply(
ops.convert_to_tensor(0.5,dtype=quadratic.dtype),
math_ops.multiply(quadratic,quadratic)),
math_ops.multiply(delta,linear))
@keras_export('keras.losses.logcosh')
deflogcosh(y_true,y_pred):
"""Logarithmofthehyperboliccosineofthepredictionerror.
`log(cosh(x))`isapproximatelyequalto`(x**2)/2`forsmall`x`and
to`abs(x)-log(2)`forlarge`x`.Thismeansthat'logcosh'worksmostly
likethemeansquarederror,butwillnotbesostronglyaffectedbythe
occasionalwildlyincorrectprediction.
Arguments:
y_true:tensoroftruetargets.
y_pred:tensorofpredictedtargets.
Returns:
Tensorwithonescalarlossentrypersample.
"""
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
def_logcosh(x):
returnx+nn.softplus(-2.*x)-math_ops.log(2.)
returnK.mean(_logcosh(y_pred-y_true),axis=-1)
@keras_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
defcategorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0):
"""Computesthecategoricalcrossentropyloss.
Args:
y_true:tensoroftruetargets.
y_pred:tensorofpredictedtargets.
from_logits:Whether`y_pred`isexpectedtobealogitstensor.Bydefault,
weassumethat`y_pred`encodesaprobabilitydistribution.
label_smoothing:Floatin[0,1].If>`0`thensmooththelabels.
Returns:
Categoricalcrossentropylossvalue.
"""
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
label_smoothing=ops.convert_to_tensor(label_smoothing,dtype=K.floatx())
def_smooth_labels():
num_classes=math_ops.cast(array_ops.shape(y_true)[1],y_pred.dtype)
returny_true*(1.0-label_smoothing)+(label_smoothing/num_classes)
y_true=smart_cond.smart_cond(label_smoothing,
_smooth_labels,lambda:y_true)
returnK.categorical_crossentropy(y_true,y_pred,from_logits=from_logits)
@keras_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
defsparse_categorical_crossentropy(y_true,y_pred,from_logits=False,axis=-1):
returnK.sparse_categorical_crossentropy(
y_true,y_pred,from_logits=from_logits,axis=axis)
@keras_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
defbinary_crossentropy(y_true,y_pred,from_logits=False,label_smoothing=0):#pylint:disable=missing-docstring
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
label_smoothing=ops.convert_to_tensor(label_smoothing,dtype=K.floatx())
def_smooth_labels():
returny_true*(1.0-label_smoothing)+0.5*label_smoothing
y_true=smart_cond.smart_cond(label_smoothing,
_smooth_labels,lambda:y_true)
returnK.mean(
K.binary_crossentropy(y_true,y_pred,from_logits=from_logits),axis=-1)
@keras_export('keras.metrics.kullback_leibler_divergence',
'keras.metrics.kld',
'keras.metrics.KLD',
'keras.losses.kullback_leibler_divergence',
'keras.losses.kld',
'keras.losses.KLD')
defkullback_leibler_divergence(y_true,y_pred):
"""ComputesKullback-Leiblerdivergencelossbetween`y_true`and`y_pred`.
`loss=y_true*log(y_true/y_pred)`
See:https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
```python
loss=tf.keras.losses.KLD([.4,.9,.2],[.5,.8,.12])
print('Loss:',loss.numpy())#Loss:0.11891246
```
Args:
y_true:Tensoroftruetargets.
y_pred:Tensorofpredictedtargets.
Returns:
A`Tensor`withloss.
Raises:
TypeError:If`y_true`cannotbecasttothe`y_pred.dtype`.
"""
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
y_true=K.clip(y_true,K.epsilon(),1)
y_pred=K.clip(y_pred,K.epsilon(),1)
returnmath_ops.reduce_sum(y_true*math_ops.log(y_true/y_pred),axis=-1)
@keras_export('keras.metrics.poisson','keras.losses.poisson')
defpoisson(y_true,y_pred):
"""ComputesthePoissonlossbetweeny_trueandy_pred.
ThePoissonlossisthemeanoftheelementsofthe`Tensor`
`y_pred-y_true*log(y_pred)`.
Usage:
```python
loss=tf.keras.losses.poisson([1.4,9.3,2.2],[4.3,8.2,12.2])
print('Loss:',loss.numpy())#Loss:-0.8045559
```
Args:
y_true:Tensoroftruetargets.
y_pred:Tensorofpredictedtargets.
Returns:
A`Tensor`withthemeanPoissonloss.
Raises:
InvalidArgumentError:If`y_true`and`y_pred`haveincompatibleshapes.
"""
y_pred=ops.convert_to_tensor(y_pred)
y_true=math_ops.cast(y_true,y_pred.dtype)
returnK.mean(y_pred-y_true*math_ops.log(y_pred+K.epsilon()),axis=-1)
@keras_export(
'keras.losses.cosine_similarity',
v1=[
'keras.metrics.cosine_proximity',
'keras.metrics.cosine',
'keras.losses.cosine_proximity',
'keras.losses.cosine',
'keras.losses.cosine_similarity',
])
defcosine_similarity(y_true,y_pred,axis=-1):
"""Computesthecosinesimilaritybetweenlabelsandpredictions.
Notethatitisanegativequantitybetween-1and0,where0indicates
orthogonalityandvaluescloserto-1indicategreatersimilarity.Thismakes
itusableasalossfunctioninasettingwhereyoutrytomaximizethe
proximitybetweenpredictionsandtargets.
`loss=-sum(y_true*y_pred)`
Args:
y_true:Tensoroftruetargets.
y_pred:Tensorofpredictedtargets.
axis:Axisalongwhichtodeterminesimilarity.
Returns:
Cosinesimilaritytensor.
"""
y_true=nn.l2_normalize(y_true,axis=axis)
y_pred=nn.l2_normalize(y_pred,axis=axis)
return-math_ops.reduce_sum(y_true*y_pred,axis=axis)
@keras_export('keras.losses.CosineSimilarity')
classCosineSimilarity(LossFunctionWrapper):
"""Computesthecosinesimilaritybetween`y_true`and`y_pred`.
Usage:
```python
cosine_loss=tf.keras.losses.CosineSimilarity(axis=1)
loss=cosine_loss([[0.,1.],[1.,1.]],[[1.,0.],[1.,1.]])
#l2_norm(y_true)=[[0.,1.],[1./1.414],1./1.414]]]
#l2_norm(y_pred)=[[1.,0.],[1./1.414],1./1.414]]]
#l2_norm(y_true).l2_norm(y_pred)=[[0.,0.],[0.5,0.5]]
#loss=mean(sum(l2_norm(y_true).l2_norm(y_pred),axis=1))
=((0.+0.)+(0.5+0.5))/2
print('Loss:',loss.numpy())#Loss:0.5
```
Usagewiththe`compile`API:
```python
model=tf.keras.Model(inputs,outputs)
model.compile('sgd',loss=tf.keras.losses.CosineSimilarity(axis=1))
```
Args:
axis:(Optional)Defaultsto-1.Thedimensionalongwhichthecosine
similarityiscomputed.
reduction:(Optional)Typeof`tf.keras.losses.Reduction`toapplytoloss.
Defaultvalueis`AUTO`.`AUTO`indicatesthatthereductionoptionwill
bedeterminedbytheusagecontext.Foralmostallcasesthisdefaultsto
`SUM_OVER_BATCH_SIZE`.
Whenusedwith`tf.distribute.Strategy`,outsideofbuilt-intraining
loopssuchas`tf.keras``compile`and`fit`,using`AUTO`or
`SUM_OVER_BATCH_SIZE`willraiseanerror.Pleasesee
https://www.tensorflow.org/alpha/tutorials/distribute/training_loops
formoredetailsonthis.
name:Optionalnamefortheop.
"""
def__init__(self,
axis=-1,
reduction=losses_utils.ReductionV2.AUTO,
name='cosine_similarity'):
super(CosineSimilarity,self).__init__(
cosine_similarity,reduction=reduction,name=name,axis=axis)
#Aliases.
mse=MSE=mean_squared_error
mae=MAE=mean_absolute_error
mape=MAPE=mean_absolute_percentage_error
msle=MSLE=mean_squared_logarithmic_error
kld=KLD=kullback_leibler_divergence
defis_categorical_crossentropy(loss):
result=((isinstance(loss,CategoricalCrossentropy)or
(isinstance(loss,LossFunctionWrapper)and
loss.fn==categorical_crossentropy)or
(hasattr(loss,'__name__')and
loss.__name__=='categorical_crossentropy')or
(loss=='categorical_crossentropy')))
returnresult
@keras_export('keras.losses.serialize')
defserialize(loss):
returnserialize_keras_object(loss)
@keras_export('keras.losses.deserialize')
defdeserialize(name,custom_objects=None):
returndeserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='lossfunction')
@keras_export('keras.losses.get')
defget(identifier):
ifidentifierisNone:
returnNone
ifisinstance(identifier,six.string_types):
identifier=str(identifier)
returndeserialize(identifier)
ifisinstance(identifier,dict):
returndeserialize(identifier)
elifcallable(identifier):
returnidentifier
else:
raiseValueError('Couldnotinterpret'
'lossfunctionidentifier:',identifier)
LABEL_DTYPES_FOR_LOSSES={
losses_impl.sparse_softmax_cross_entropy:'int32',
sparse_categorical_crossentropy:'int32'
}
ReadtheDocs
v:stable
Versions
latest
stable
Downloads
html
OnReadtheDocs
ProjectHome
Builds
FreedocumenthostingprovidedbyReadtheDocs.