Deep Learning Algorithm Implementations 1.0.0
C++ implementations of fundamental deep learning algorithms
Loading...
Searching...
No Matches
losses.hpp
Go to the documentation of this file.
1#pragma once
2
3#include <memory>
4#include <vector>
5#include "utils/autograd.hpp"
6#include "utils/matrix.hpp"
7
15namespace dl::loss {
16 using utils::Variable;
17 using utils::VariableD;
18 using utils::VariableF;
19 using utils::Matrix;
20 using utils::MatrixD;
21 using utils::MatrixF;
22
26 template<typename T>
28 public:
29 virtual ~AutogradLoss() = default;
30
37 virtual Variable<T> forward(const Variable<T>& predictions, const Variable<T>& targets) = 0;
38
42 Variable<T> operator()(const Variable<T>& predictions, const Variable<T>& targets) {
43 return forward(predictions, targets);
44 }
45 };
46
54 template<typename T>
55 class MSELoss : public AutogradLoss<T> {
56 public:
61 explicit MSELoss(const std::string& reduction = "mean") : reduction_(reduction) {}
62
69 Variable<T> forward(const Variable<T>& predictions, const Variable<T>& targets) override;
70
71 private:
72 std::string reduction_;
73 };
74
82 template<typename T>
83 class CrossEntropyLoss : public AutogradLoss<T> {
84 public:
89 explicit CrossEntropyLoss(const std::string& reduction = "mean") : reduction_(reduction) {}
90
97 Variable<T> forward(const Variable<T>& predictions, const Variable<T>& targets) override;
98
99 private:
100 std::string reduction_;
101
105 Variable<T> softmax(const Variable<T>& logits);
106
110 Variable<T> log_softmax(const Variable<T>& logits);
111 };
112
120 template<typename T>
121 class BCELoss : public AutogradLoss<T> {
122 public:
127 explicit BCELoss(const std::string& reduction = "mean") : reduction_(reduction) {}
128
135 Variable<T> forward(const Variable<T>& predictions, const Variable<T>& targets) override;
136
137 private:
138 std::string reduction_;
139 };
140
147 template<typename T>
148 class BCEWithLogitsLoss : public AutogradLoss<T> {
149 public:
154 explicit BCEWithLogitsLoss(const std::string& reduction = "mean") : reduction_(reduction) {}
155
162 Variable<T> forward(const Variable<T>& predictions, const Variable<T>& targets) override;
163
164 private:
165 std::string reduction_;
166 };
167
175 template<typename T>
176 class HingeLoss : public AutogradLoss<T> {
177 public:
182 explicit HingeLoss(const std::string& reduction = "mean") : reduction_(reduction) {}
183
190 Variable<T> forward(const Variable<T>& predictions, const Variable<T>& targets) override;
191
192 private:
193 std::string reduction_;
194 };
195
206 template<typename T>
207 class HuberLoss : public AutogradLoss<T> {
208 public:
214 explicit HuberLoss(T delta = 1.0, const std::string& reduction = "mean")
215 : delta_(delta), reduction_(reduction) {}
216
223 Variable<T> forward(const Variable<T>& predictions, const Variable<T>& targets) override;
224
225 private:
226 T delta_;
227 std::string reduction_;
228 };
229
230 // Type aliases for convenience
243
244} // namespace dl::loss
PyTorch-like automatic differentiation engine.
Base class for autograd-compatible loss functions.
Definition losses.hpp:27
Variable< T > operator()(const Variable< T > &predictions, const Variable< T > &targets)
Convenience operator for computing loss.
Definition losses.hpp:42
virtual ~AutogradLoss()=default
virtual Variable< T > forward(const Variable< T > &predictions, const Variable< T > &targets)=0
Compute loss between predictions and targets.
Binary Cross Entropy Loss with autograd support.
Definition losses.hpp:121
Variable< T > forward(const Variable< T > &predictions, const Variable< T > &targets) override
Forward pass: compute binary cross entropy loss.
Definition losses.cpp:86
BCELoss(const std::string &reduction="mean")
Constructor.
Definition losses.hpp:127
Binary Cross Entropy with Logits Loss.
Definition losses.hpp:148
BCEWithLogitsLoss(const std::string &reduction="mean")
Constructor.
Definition losses.hpp:154
Variable< T > forward(const Variable< T > &predictions, const Variable< T > &targets) override
Forward pass: compute BCE loss from logits.
Definition losses.cpp:121
Cross Entropy Loss with autograd support.
Definition losses.hpp:83
CrossEntropyLoss(const std::string &reduction="mean")
Constructor.
Definition losses.hpp:89
Variable< T > forward(const Variable< T > &predictions, const Variable< T > &targets) override
Forward pass: compute cross entropy loss.
Definition losses.cpp:61
Hinge Loss with autograd support.
Definition losses.hpp:176
HingeLoss(const std::string &reduction="mean")
Constructor.
Definition losses.hpp:182
Variable< T > forward(const Variable< T > &predictions, const Variable< T > &targets) override
Forward pass: compute hinge loss.
Definition losses.cpp:137
Huber Loss with autograd support.
Definition losses.hpp:207
Variable< T > forward(const Variable< T > &predictions, const Variable< T > &targets) override
Forward pass: compute Huber loss.
Definition losses.cpp:166
HuberLoss(T delta=1.0, const std::string &reduction="mean")
Constructor.
Definition losses.hpp:214
Mean Squared Error Loss with autograd support.
Definition losses.hpp:55
MSELoss(const std::string &reduction="mean")
Constructor.
Definition losses.hpp:61
Variable< T > forward(const Variable< T > &predictions, const Variable< T > &targets) override
Forward pass: compute MSE loss.
Definition losses.cpp:12
Variable class that supports automatic differentiation.
Definition autograd.hpp:58
Matrix utility class for deep learning operations.